hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
|---|---|---|---|
2d8acbd319ec8e4c8d05dc6f3e49b39f74abd3b3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
#include <string.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <algorithm>
#include <helper_cuda.h>
#include <helper_functions.h>
#include <hip/hip_runtime.h>
#include <rocblas.h>
#define CUBLAS_ERROR_CHECK(sdata) if(HIPBLAS_STATUS_SUCCESS!=sdata){printf("ERROR at:%s:%d\n",__FILE__,__LINE__);exit(-1);}
using namespace cv;
using namespace std;
#define nbins 9
#define nH 4
#define nW 3
#define nC 2
#define nB 6
#define T 12.0f
#define PI 3.141593
//feature
float feature[nB * nbins];
extern "C"
//HOG without Gamma and gaussian
__global__ void
calcHOG(float *d_fgray, uchar *gray, int *cell_hist, float *block_hist, int row, int col)
{
__shared__ int tmp[nH*nW*nbins];
if (threadIdx.x < nH*nW*nbins)
tmp[threadIdx.x] = 0;
__syncthreads();
int i = blockDim.x * blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int len = row * col;
int row2 = row / nH * nH;
int col2 = col / nW * nW;
int Hstep = row2 / nH;
int Wstep = col2 / nW;
while (i < nH*nW*nbins)
{
cell_hist[i] = 0;
i += stride;
}
__syncthreads();
i = blockDim.x * blockIdx.x + threadIdx.x;
while (i < len)
{
d_fgray[i] = (float)gray[i];
i += stride;
}
__syncthreads();
//filter
//Gx Gy Mag
//int fx[3] = {-1,0,1};
//int fy[3] = {-1,0,1};
i = blockDim.x * blockIdx.x + threadIdx.x;
while (i < len)
{
int r = i / col;
int c = i % col;
float tmpx = 0.0f;
float tmpy = 0.0f;
if ( r > 0 && r < row-1 && c > 0 && c < col-1 )
{
tmpx = d_fgray[i+1] - d_fgray[i-1];
tmpy = d_fgray[i+col]-d_fgray[i-col];
}
float t_theta = atan( tmpy/tmpx ) / PI * 180.0f + 180.0f;
int ind = ( (int)t_theta / (180 / nbins) ) % nbins;
atomicAdd( &tmp[ (r/Hstep*nW + c/Wstep) * nbins + ind ], (int)sqrtf( tmpx*tmpx + tmpy*tmpy ) );
i += stride;
}
__syncthreads();
if (threadIdx.x < nH*nW*nbins)
atomicAdd( &cell_hist[threadIdx.x], tmp[threadIdx.x] );
__syncthreads();
//merge
i = blockDim.x * blockIdx.x + threadIdx.x;
while (i < nbins*nH*nW)
{
int ind = i % nbins;
int ti = i / nbins;
int ir = ti / nW;
int ic = ti % nW;
if (ir < nH -1 && ic < nW -1)
{
int index = (ir*(nW-1)+ic) * nbins + ind;
block_hist[ index ] = (float)( cell_hist[ index ] + cell_hist[ index + nbins] +
cell_hist[ index + nW*nbins ] + cell_hist[ index + (nW+1)*nbins ] )
/ float(len);
}
i += stride;
}
}
//TODO:?different from CPU versions
void print_feature()
{
for (int i = 0; i < nB * nbins; i++)
{
printf("%f ", feature[i]);
}
printf("\n");
}
void check(int argc, char** argv, char *name)
{
if (argc < 2)
{
printf("ERROR!\n");
printf("Usage: %s path_of_a_image\n", name);
exit(-1);
}
if (-1 == access(argv[1], R_OK | F_OK) )
{
printf("ERROR!\n");
printf("The file is not exist or can't be read!\n");
exit(-1);
}
}
int main(int argc, char** argv)
{
char *name = "shape_hog_cuda";
check(argc, argv, name);
char *path = argv[1];
//read img
Mat image;
//image = imread(path, CV_LOAD_IMAGE_COLOR);
image = imread(path, CV_LOAD_IMAGE_GRAYSCALE);
if( !image.data )
{
printf("ERROR!\n");
printf("Can't read the file or it's not a image.\n");
exit(-1);
}
//init
memset(feature, 0, nB * nbins * sizeof(float));
//init graphic card
int dev = 0;
checkCudaErrors(hipSetDevice(dev));
hipDeviceProp_t deviceProp;
checkCudaErrors(hipGetDeviceProperties(&deviceProp, dev));
//con-currency size
int threadPerBlock = 2*256;
int blockPerGrid = 2*deviceProp.multiProcessorCount;
int len_img;
uchar *gray = image.ptr<uchar>(0);
int row = image.rows;
int col = image.cols;
len_img = row * col;
//alloc memory on graphic card
uchar *d_gray = NULL;
float *d_fgray = NULL;
int *d_cellhist = NULL;
float *d_blockhist = NULL;
checkCudaErrors(hipMalloc((void**)&d_gray, len_img * sizeof(uchar)));
checkCudaErrors(hipMalloc((void**)&d_fgray, len_img * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&d_cellhist, nH*nW*nbins * sizeof(int)));
checkCudaErrors(hipMalloc((void**)&d_blockhist, nB*nbins* sizeof(float)));
//create cublas handle
hipblasHandle_t handlet;
hipblasCreate(&handlet);
checkCudaErrors(hipMemcpy(d_gray, gray, len_img * sizeof(uchar), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( calcHOG), dim3(blockPerGrid), dim3(threadPerBlock), 0, 0, d_fgray, d_gray, d_cellhist, d_blockhist, row, col);
// check if kernel execution generated and error
getLastCudaError("Kernel execution failed");
checkCudaErrors(hipMemcpy(feature, d_blockhist, nB*nbins*sizeof(float), hipMemcpyDeviceToHost));
//print
print_feature();
//free
hipFree(d_gray);
hipFree(d_fgray);
hipFree(d_cellhist);
hipFree(d_blockhist);
hipblasDestroy(handlet);
return 0;
}
|
2d8acbd319ec8e4c8d05dc6f3e49b39f74abd3b3.cu
|
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
#include <string.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <algorithm>
#include <helper_cuda.h>
#include <helper_functions.h>
#include <cuda_runtime.h>
#include <cublas_v2.h>
#define CUBLAS_ERROR_CHECK(sdata) if(CUBLAS_STATUS_SUCCESS!=sdata){printf("ERROR at:%s:%d\n",__FILE__,__LINE__);exit(-1);}
using namespace cv;
using namespace std;
#define nbins 9
#define nH 4
#define nW 3
#define nC 2
#define nB 6
#define T 12.0f
#define PI 3.141593
//feature
float feature[nB * nbins];
extern "C"
//HOG without Gamma and gaussian
__global__ void
calcHOG(float *d_fgray, uchar *gray, int *cell_hist, float *block_hist, int row, int col)
{
__shared__ int tmp[nH*nW*nbins];
if (threadIdx.x < nH*nW*nbins)
tmp[threadIdx.x] = 0;
__syncthreads();
int i = blockDim.x * blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int len = row * col;
int row2 = row / nH * nH;
int col2 = col / nW * nW;
int Hstep = row2 / nH;
int Wstep = col2 / nW;
while (i < nH*nW*nbins)
{
cell_hist[i] = 0;
i += stride;
}
__syncthreads();
i = blockDim.x * blockIdx.x + threadIdx.x;
while (i < len)
{
d_fgray[i] = (float)gray[i];
i += stride;
}
__syncthreads();
//filter
//此处通过设置变量,省去了Gx Gy Mag
//int fx[3] = {-1,0,1};
//int fy[3] = {-1,0,1};
i = blockDim.x * blockIdx.x + threadIdx.x;
while (i < len)
{
int r = i / col;
int c = i % col;
float tmpx = 0.0f;
float tmpy = 0.0f;
if ( r > 0 && r < row-1 && c > 0 && c < col-1 )
{
tmpx = d_fgray[i+1] - d_fgray[i-1];
tmpy = d_fgray[i+col]-d_fgray[i-col];
}
float t_theta = atan( tmpy/tmpx ) / PI * 180.0f + 180.0f;
int ind = ( (int)t_theta / (180 / nbins) ) % nbins;
atomicAdd( &tmp[ (r/Hstep*nW + c/Wstep) * nbins + ind ], (int)sqrtf( tmpx*tmpx + tmpy*tmpy ) );
i += stride;
}
__syncthreads();
if (threadIdx.x < nH*nW*nbins)
atomicAdd( &cell_hist[threadIdx.x], tmp[threadIdx.x] );
__syncthreads();
//merge
i = blockDim.x * blockIdx.x + threadIdx.x;
while (i < nbins*nH*nW)
{
int ind = i % nbins;
int ti = i / nbins;
int ir = ti / nW;
int ic = ti % nW;
if (ir < nH -1 && ic < nW -1)
{
int index = (ir*(nW-1)+ic) * nbins + ind;
block_hist[ index ] = (float)( cell_hist[ index ] + cell_hist[ index + nbins] +
cell_hist[ index + nW*nbins ] + cell_hist[ index + (nW+1)*nbins ] )
/ float(len);
}
i += stride;
}
}
//TODO:?different from CPU versions
void print_feature()
{
for (int i = 0; i < nB * nbins; i++)
{
printf("%f ", feature[i]);
}
printf("\n");
}
void check(int argc, char** argv, char *name)
{
if (argc < 2)
{
printf("ERROR!\n");
printf("Usage: %s path_of_a_image\n", name);
exit(-1);
}
if (-1 == access(argv[1], R_OK | F_OK) )
{
printf("ERROR!\n");
printf("The file is not exist or can't be read!\n");
exit(-1);
}
}
int main(int argc, char** argv)
{
char *name = "shape_hog_cuda";
check(argc, argv, name);
char *path = argv[1];
//read img
Mat image;
//image = imread(path, CV_LOAD_IMAGE_COLOR);
image = imread(path, CV_LOAD_IMAGE_GRAYSCALE);
if( !image.data )
{
printf("ERROR!\n");
printf("Can't read the file or it's not a image.\n");
exit(-1);
}
//init
memset(feature, 0, nB * nbins * sizeof(float));
//init graphic card
int dev = 0;
checkCudaErrors(cudaSetDevice(dev));
cudaDeviceProp deviceProp;
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, dev));
//con-currency size
int threadPerBlock = 2*256;
int blockPerGrid = 2*deviceProp.multiProcessorCount;
int len_img;
uchar *gray = image.ptr<uchar>(0);
int row = image.rows;
int col = image.cols;
len_img = row * col;
//alloc memory on graphic card
uchar *d_gray = NULL;
float *d_fgray = NULL;
int *d_cellhist = NULL;
float *d_blockhist = NULL;
checkCudaErrors(cudaMalloc((void**)&d_gray, len_img * sizeof(uchar)));
checkCudaErrors(cudaMalloc((void**)&d_fgray, len_img * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&d_cellhist, nH*nW*nbins * sizeof(int)));
checkCudaErrors(cudaMalloc((void**)&d_blockhist, nB*nbins* sizeof(float)));
//create cublas handle
cublasHandle_t handlet;
cublasCreate(&handlet);
checkCudaErrors(cudaMemcpy(d_gray, gray, len_img * sizeof(uchar), cudaMemcpyHostToDevice));
calcHOG<<<blockPerGrid, threadPerBlock, 0>>>(d_fgray, d_gray, d_cellhist, d_blockhist, row, col);
// check if kernel execution generated and error
getLastCudaError("Kernel execution failed");
checkCudaErrors(cudaMemcpy(feature, d_blockhist, nB*nbins*sizeof(float), cudaMemcpyDeviceToHost));
//print
print_feature();
//free
cudaFree(d_gray);
cudaFree(d_fgray);
cudaFree(d_cellhist);
cudaFree(d_blockhist);
cublasDestroy(handlet);
return 0;
}
|
b777a828aff341d91800671313e5e32ace677510.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void hello(char *a, int *b) { a[threadIdx.x] += b[threadIdx.x]; }
|
b777a828aff341d91800671313e5e32ace677510.cu
|
#include "includes.h"
__global__ void hello(char *a, int *b) { a[threadIdx.x] += b[threadIdx.x]; }
|
9197d20f4425435def721481c632152eb8ff1657.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/native/TensorAdvancedIndexing.h>
#include <ATen/native/IndexingUtils.h>
#include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/ExpandUtils.h>
#include <ATen/MemoryOverlap.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/hip/HIPUtils.h>
#include <THH/THHDeviceUtils.cuh>
#include <THH/THHGeneral.h>
#include <THH/THHTensorSort.cuh>
#include <ATen/hip/HIPContext.h>
#include <THH/THHThrustAllocator.cuh>
#include <thrust/execution_policy.h>
#include <thrust/sort.h>
#include <thrust/transform.h>
#include <THH/THHAtomics.cuh>
#include <hipcub/hipcub.hpp>
#include <c10/macros/Macros.h>
namespace {
template <typename scalar_t, int SZ>
__global__ void indexing_backward_kernel(
int64_t* sorted_indices, int64_t* indices, scalar_t* grad_output, scalar_t* grad_weight,
int64_t numel, int64_t stride, int64_t stride_before, int64_t outer_dim) {
//numel is total number of flattened indices, not expanded to dimensions that are not indexed.
//stride is the cumulative size of the not-indexed last dimensions
//stride_before is the stride of the dimension immediately preceding first indexed dimension
//if indexing starts from the 0th dimension, stride_before does not matter because blockIdx.z will be 0 in this case
//outer_dim is number of elements in the first unindexed dimensions
using accscalar_t = at::acc_type<scalar_t, true>;
// Each warp is responsible for an input into the LookupTable.
// If the preceding input has the same destination index as this input, then the warp
// exits immediately. The warp also processes subsequent inputs with the
// same value.
//
// Input Warp
// 1 <warp 1>
// 1 <warp 1> (<warp 2> exits without doing any work)
// 5 <warp 3>
// 8 <warp 4>
// Number of values processed by each thread (grain size)
for (int64_t z = blockIdx.z; z < outer_dim; z += gridDim.z){
int64_t idx = blockIdx.x * blockDim.y + threadIdx.y;
if (idx < numel
&& (idx == 0 || sorted_indices[idx] != sorted_indices[idx - 1])){
do {
int64_t start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ;
const int64_t weight_row = ((int64_t) sorted_indices[idx]) * stride + z * stride_before;
const int64_t grad_row = ((int64_t) indices[idx]) * stride + z * numel * stride;
const accscalar_t scale = (accscalar_t)1.0;
accscalar_t gradient[SZ];
accscalar_t weight[SZ];
while (start_feature < stride) {
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int64_t feature_dim = start_feature + ii * C10_WARP_SIZE;
if (feature_dim < stride) {
gradient[ii] = static_cast<accscalar_t>(grad_output[grad_row + feature_dim]);
weight[ii] = static_cast<accscalar_t>(grad_weight[weight_row + feature_dim]);
}
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
weight[ii] += gradient[ii] * scale;
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int64_t feature_dim = start_feature + ii * C10_WARP_SIZE;
if (feature_dim < stride) {
grad_weight[weight_row + feature_dim] = static_cast<scalar_t>(weight[ii]);
}
}
start_feature += gridDim.y * blockDim.x * SZ;
}
idx++;
} while (idx < numel && sorted_indices[idx] == sorted_indices[idx - 1]);
}
}
}
}
namespace at { namespace native {
static Tensor wrapIndexOnce(const Tensor & index, int64_t dim, int64_t dim_size, bool check_range=true) {
//we don't need to check range in backward - if there were out of bounds indices forward should already have errored out
if (index.numel() != 0 && check_range) {
auto max_idx = index.max().item<int64_t>();
auto min_idx = index.min().item<int64_t>();
if (max_idx >= dim_size) {
TORCH_CHECK_INDEX(false, "index ", max_idx, " is out of bounds for dimension ", dim, " with size ", dim_size);
}
if (min_idx < -dim_size) {
TORCH_CHECK_INDEX(false, "index ", min_idx, " is out of bounds for dimension ", dim, " with size ", dim_size);
}
}
return index.remainder(dim_size);
}
static std::vector<int64_t> computeLinearStride(const Tensor & tensor) {
// computes the stride as if tensor were contiguous
auto sizes = tensor.sizes();
std::vector<int64_t> stride(tensor.dim());
stride[tensor.dim() - 1] = 1;
std::partial_sum(sizes.rbegin(), sizes.rend() - 1, stride.rbegin() + 1, std::multiplies<int64_t>());
return stride;
}
static std::tuple<Tensor, int64_t, int64_t, int64_t>
computeLinearIndex(const Tensor & src, TensorList indices, bool check_range) {
auto strides = computeLinearStride(src);
const auto& backend = src.type().backend();
// Compute the linear index by multiplying the indexing tensors by the
// stride and summing them. All the indexing tensors have the same shape at
// this point. We also compute the number of dimensions before and after that
// are not being index.
Tensor linearIndex;
int64_t emptyBefore = 0, emptyAfter = 0, nElemBefore = 1, nElemAfter = 1, strideBefore =0;
for (auto i = decltype(src.dim()){0}; i < src.dim(); i++) {
if (indices[i].defined()) {
// Cast index to the longType matching src's backend
// This allows us to support ie indexing a cuda tensor with a cpu tensor
Tensor index = (wrapIndexOnce(indices[i], i, src.size(i), check_range) * strides[i]).toBackend(backend);
if (linearIndex.defined()) {
linearIndex += index;
} else {
linearIndex = index;
if (i>0) {
strideBefore = src.stride(i-1); // stride after undefined dimensions
}
}
} else if (linearIndex.defined()) {
emptyAfter++;
nElemAfter *= src.size(i);
} else {
emptyBefore++;
nElemBefore *= src.size(i);
}
}
return std::make_tuple(std::move(linearIndex), nElemBefore, strideBefore, nElemAfter);
}
static std::tuple<Tensor, Tensor, int64_t, int64_t, int64_t, std::vector<int64_t>> makeLinearIndex(Tensor self, TensorList orig, bool check_range) {
checkIndexTensorTypes(orig);
// first expand BoolTensor (masks) or ByteTensor (masks) into 1 or more LongTensors
auto indices = expandTensors(self, orig);
// next broadcast all index tensors together
indices = expand_outplace(indices);
// add missing null Tensors so that it matches self.dim()
while (indices.size() < (size_t)self.dim()) {
indices.emplace_back();
}
// if the non-null indices are not all adjacent, transpose self and indices
// together so that they're adjacent at the front
std::vector<int64_t> inversePerm;
if (!hasContiguousSubspace(indices)) {
std::tie(self, indices, inversePerm) = transposeToFrontAndInvPerm(self, indices);
}
int64_t nElemBefore, strideBefore, nElemAfter;
Tensor linearIndex;
std::tie(linearIndex, nElemBefore, strideBefore, nElemAfter) = computeLinearIndex(self, indices, check_range);
return std::make_tuple(linearIndex, self, nElemBefore, strideBefore, nElemAfter, inversePerm);
}
namespace {
void index_put_accum_kernel(Tensor & self, TensorList indices, const Tensor & value, bool unsafe) {
if (indices.size() > (size_t)self.dim()) {
TORCH_CHECK_INDEX(false, "too many indices for tensor of dimension ", self.dim(), " (got ", indices.size(), ")");
}
auto value_ = value.contiguous();
Tensor linearIndex, expandedValue, src;
int64_t nElemBefore, strideBefore, sliceSize;
std::vector<int64_t> inversePerm;
std::tie(linearIndex, src, nElemBefore, strideBefore, sliceSize, inversePerm) = makeLinearIndex(self, indices, !unsafe);
int64_t num_indices = linearIndex.numel();
if (num_indices > 0 && sliceSize > 0) {
const bool permuted = !src.is_contiguous();
auto src_ = permuted ? src.contiguous() : src;
linearIndex = linearIndex.reshape(-1);
auto sorted_indices = at::empty_like(linearIndex, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto orig_indices = at::empty_like(linearIndex, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
using device_ptr = thrust::device_ptr<int64_t>;
const hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
linearIndex.floor_divide_(sliceSize);
{
sorted_indices.copy_(linearIndex);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
// Fill sortedOrigIndices with sequential indices
const auto count_iter = thrust::counting_iterator<int64_t>(0);
auto orig_data = device_ptr(orig_indices.data_ptr<int64_t>());
thrust::copy(policy, count_iter, count_iter + num_indices, orig_data);
// Sort the inputs into sorted with the corresponding indices; we
// don't need a stable or multidimensional sort, so just use Thrust
// directly
// Sort; a stable sort is not required
// NB - not passing comparator causes thrust to use radix sort, and it hurts perf A LOT, at least for medium (few K) sized indices
auto sorted_data = device_ptr(sorted_indices.data_ptr<int64_t>());
thrust::sort_by_key(policy, sorted_data, sorted_data + num_indices, orig_data, ThrustLTOp<int64_t>());
}
TORCH_INTERNAL_ASSERT(linearIndex.numel()*sliceSize*nElemBefore == value.numel(), "number of flattened indices did not match number of elements in the value tensor", linearIndex.numel()*sliceSize*nElemBefore, value.numel());
const int UNROLL = 4;
const int indices_per_block = 4;
dim3 grid(THCCeilDiv(num_indices, (int64_t) indices_per_block),
std::min<int>(at::cuda::getCurrentDeviceProperties()->maxGridSize[1], THCCeilDiv(sliceSize, (int64_t) (C10_WARP_SIZE*UNROLL))),
::min(std::max<int>(1,nElemBefore), at::cuda::getCurrentDeviceProperties()->maxGridSize[2]));
dim3 block(C10_WARP_SIZE, indices_per_block);
AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
value_.scalar_type(), "indexing_backward", [&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "indexing_backward", [&] {
hipLaunchKernelGGL(( indexing_backward_kernel<scalar_t, UNROLL>), dim3(grid), dim3(block), 0, stream,
sorted_indices.data_ptr<int64_t>(),
orig_indices.data_ptr<int64_t>(),
value_.data_ptr<scalar_t>(),
src_.data_ptr<scalar_t>(),
num_indices,
sliceSize,
strideBefore,
nElemBefore);
});
});
AT_CUDA_CHECK(hipGetLastError());
if (permuted)
self.copy_(src_.permute(inversePerm));
}
}
REGISTER_CUDA_DISPATCH(index_put_accum_stub, &index_put_accum_kernel);
} //anonymous
// Check tensor dimensions for index operations, and return the slice size.
static ptrdiff_t getSliceSize(const Tensor & dst,
int dim,
const Tensor & index,
const Tensor & src)
{
int dstDims = dst.dim();
int srcDims = src.dim();
TORCH_CHECK(index.dim() <= 1, "Index must be vector or scalar");
ptrdiff_t dstSliceSize = 1;
TORCH_CHECK(dim >= 0 && dim < dstDims, "Indexing dim ", dim, " is out of bounds");
for (int d = 0; d < dstDims; d++) {
if (d != dim) {
dstSliceSize *= dst.size(d);
}
}
TORCH_CHECK(dim < srcDims, "Indexing dim ", dim, " is out of bounds");
TORCH_CHECK(index.numel() == src.size(dim),
"length of src.size[dim] is not equal to length of indices");
ptrdiff_t srcSliceSize = 1;
bool mismatch = false;
if (dstDims != srcDims) mismatch = true;
for (int d = 0; d < srcDims; d++) {
if (d != dim) {
srcSliceSize *= src.size(d);
if (!mismatch && dst.size(d) != src.size(d)) mismatch = true;
}
}
TORCH_CHECK(dstSliceSize == srcSliceSize,
"Source/destination tensor have different slice sizes (%ld vs %ld)",
dstSliceSize, srcSliceSize);
if (mismatch) {
TORCH_WARN_ONCE(
"Warning: source/destination slices have same size but different "
"shape for an index operation. This behavior is deprecated.\n");
}
return dstSliceSize;
}
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexAddLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexAddSmallIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<IndicesType, IndexType> indices,
int dstAddDim,
int srcAddDim,
IndexType innerSize,
int64_t dstAddDimSize,
T alpha) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType srcIndex = 0; srcIndex < indices.sizes[0]; ++srcIndex) {
// Lua indices begin at 1
IndexType dstIndex =
indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(srcIndex, indices)];
CUDA_KERNEL_ASSERT(dstIndex < dstAddDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstAddDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcAddDim];
gpuAtomicAddNoReturn(&dst.data[dstOffset], alpha * src.data[srcOffset]);
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexAddSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexAddLargeIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<IndicesType, IndexType> indices,
int dstAddDim,
int srcAddDim,
IndexType totalSize,
IndexType innerSize,
int64_t dstAddDimSize,
T alpha) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType srcIndex, elementInSlice;
if (IndexIsMajor) {
srcIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
srcIndex = linearIndex % innerSize;
}
// Lua indices begin at 1
IndexType dstIndex =
indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(srcIndex, indices)];
CUDA_KERNEL_ASSERT(dstIndex < dstAddDimSize);
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstAddDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcAddDim];
gpuAtomicAddNoReturn(&dst.data[dstOffset], alpha * src.data[srcOffset]);
}
}
// Compare the stride between adjacent slices (sliceStride) with strides in the
// other dimensions (i.e., strides *inside* each slice).
//
// - Returns true if some dimension inside the slice has lower stride than
// sliceStride. The simplest example is a 2-D contiguous tensor with sliceDim
// == 0 (that is, each slice is a row).
//
// In this case, we choose the CUDA kernel that processes the data in
// "index-major order". For example, if thread count equals slice size, then
// all threads process slice #0 in lockstep, and then slice #1, and so on.
//
// - Otherwise (i.e., sliceStride has the lowest value), this function returns
// false. The simplest example is a 2-D contiguous tensor with sliceDim == 1
// (each slice is a column).
//
// In this case, we choose the CUDA kernel that processes the data in
// "elementInSlice-major order". For example, each thread can process element
// #0 of every slice, and then element #1 of every slice, and so on.
template <typename scalar_t>
bool indexShouldBeMajor(cuda::detail::TensorInfo<scalar_t, unsigned int> &info,
int sliceDim)
{
// The stride between adjacent slices (e.g., between element #0 of slice #100
// and element #0 of slice #101).
unsigned int sliceStride = info.strides[sliceDim];
for (int i = 0; i < info.dims; ++i) {
if (i != sliceDim && info.sizes[i] > 1 && info.strides[i] < sliceStride) {
return true;
}
}
return false;
}
Tensor& index_add_cuda_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & source, const at::Scalar alpha) {
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("index_add_cuda_");
dim = maybe_wrap_dim(dim, self.dim());
TensorArg self_arg{self, "self", 1}, index_arg{index, "index", 3}, source_arg{source, "source", 4};
checkAllSameGPU("index_add", {self_arg, index_arg, source_arg});
TORCH_CHECK_INDEX(index.dim() <= 1, "index_add_(): Index is supposed to be a vector");
TORCH_CHECK(index.scalar_type() == ScalarType::Long || index.scalar_type() == ScalarType::Int, "index_add_(): Expected dtype int32/int64 for index");
TORCH_CHECK(self.scalar_type() == source.scalar_type(),
"index_add_(): self and source must have the same scalar type");
TORCH_CHECK(dim == 0 || dim < source.dim(),
"index_add_(): Indexing dim ", dim, " is out of bounds of tensor");
TORCH_CHECK(index.numel() == (source.dim() == 0 ? 1 : source.size(dim)),
"index_add_(): Number of indices should be equal to self.size(dim)");
// Scalars are treated as 1-d tensor
Tensor self_ = (self.dim() == 0) ? self.view(1) : self;
Tensor source_ = (source.dim() == 0) ? source.view(1) : source;
TORCH_CHECK(self.dim() <= MAX_CUTORCH_DIMS, CUTORCH_DIM_WARNING);
TORCH_CHECK(source.dim() <= MAX_CUTORCH_DIMS, CUTORCH_DIM_WARNING);
TORCH_CHECK(index.dim() <= MAX_CUTORCH_DIMS, CUTORCH_DIM_WARNING);
at::assert_no_internal_overlap(self);
at::assert_no_partial_overlap(self, index);
at::assert_no_partial_overlap(self, source);
// The `source` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of index we are choosing, which is the total size
// of the tensor `index`.
ptrdiff_t sliceSize = getSliceSize(self_, dim, index, source_);
ptrdiff_t sourceTotalSize = source.numel();
int64_t selfAddDimSize = self_.size(dim);
ptrdiff_t numIndex = index.numel();
if (sliceSize == 0) {
return self;
}
const hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
bool indContig = index.is_contiguous();
int mpc = at::cuda::getCurrentDeviceProperties()->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, SELF_DIM, SOURCE_DIM, IDX_DIM) \
hipLaunchKernelGGL(( indexAddSmallIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, SELF_DIM, SOURCE_DIM, IDX_DIM>) \
, dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \
selfInfo, sourceInfo, indexInfo, \
selfAddDim, sourceAddDim, sliceSize, selfAddDimSize, alpha_scalar);
#define LARGE_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, \
SELF_DIM, SOURCE_DIM, IDX_DIM, IDX_IS_MAJOR) \
hipLaunchKernelGGL(( indexAddLargeIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, \
SELF_DIM, SOURCE_DIM, IDX_DIM, IDX_IS_MAJOR>) \
, dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \
selfInfo, sourceInfo, indexInfo, \
selfAddDim, sourceAddDim, sourceTotalSize, \
(IDX_IS_MAJOR) ? sliceSize : numIndex, \
selfAddDimSize, alpha_scalar);
dim3 smallIndexGrid(::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(::min(THCCeilDiv(sourceTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(::min(sourceTotalSize, (ptrdiff_t)128));
if (cuda::detail::canUse32BitIndexMath(self) &&
cuda::detail::canUse32BitIndexMath(source) &&
cuda::detail::canUse32BitIndexMath(index)) {
AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "index_add", [&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "index_add", [&] {
cuda::detail::TensorInfo<scalar_t, unsigned int> selfInfo =
cuda::detail::getTensorInfo<scalar_t, unsigned int>(self_);
int selfAddDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfAddDim);
AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_add_cuda_", [&] () {
auto sourceInfo =
cuda::detail::getTensorInfo<scalar_t, unsigned int>(source_);
int sourceAddDim = sourceInfo.collapseDims(dim);
sourceInfo.reduceDim(sourceAddDim);
auto indexInfo =
cuda::detail::getTensorInfo<index_t, unsigned int>(index);
indexInfo.collapseDims();
auto alpha_scalar = alpha.to<scalar_t>();
// A reasonable choice for when to have each thread iterate over
// index to choose
if (numIndex <= 16) {
if (selfInfo.dims == 1 && sourceInfo.dims == 1 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2);
} else if (selfInfo.dims == 2 && sourceInfo.dims == 2 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2);
} else if (selfInfo.dims == 3 && sourceInfo.dims == 3 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1);
}
} else {
bool indexIsMajor = indexShouldBeMajor(selfInfo, selfAddDim);
if (selfInfo.dims == 1 && sourceInfo.dims == 1 && indContig) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2, true);
} else if (selfInfo.dims == 2 && sourceInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, true);
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, false);
}
} else if (selfInfo.dims == 3 && sourceInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, true);
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, false);
}
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1, true);
}
}
});
});
});
} else {
AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "index_add", [&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "index_add", [&] {
cuda::detail::TensorInfo<scalar_t, uint64_t> selfInfo =
cuda::detail::getTensorInfo<scalar_t, uint64_t>(self_);
int selfAddDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfAddDim);
cuda::detail::TensorInfo<scalar_t, uint64_t> sourceInfo =
cuda::detail::getTensorInfo<scalar_t, uint64_t>(source_);
int sourceAddDim = sourceInfo.collapseDims(dim);
sourceInfo.reduceDim(sourceAddDim);
AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_add_cuda_", [&] () {
cuda::detail::TensorInfo<index_t, uint64_t> indexInfo =
cuda::detail::getTensorInfo<index_t, uint64_t>(index);
indexInfo.collapseDims();
auto alpha_scalar = alpha.to<scalar_t>();
LARGE_INDEX(scalar_t, index_t, uint64_t, -1, -1, -1, true);
});
});
});
}
return self;
#undef SMALL_INDEX
#undef LARGE_INDEX
}
namespace {
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexSelectLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexSelectSmallIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<IndicesType, IndexType> indices,
int dstSelectDim,
int srcSelectDim,
IndexType innerSize,
int64_t srcSelectDimSize) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType dstIndex = 0; dstIndex < indices.sizes[0]; ++dstIndex) {
IndexType srcIndex =
indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(dstIndex, indices)];
CUDA_KERNEL_ASSERT(srcIndex < srcSelectDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstSelectDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcSelectDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexSelectSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexSelectLargeIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<IndicesType, IndexType> indices,
int dstSelectDim,
int srcSelectDim,
IndexType totalSize,
IndexType innerSize,
int64_t srcSelectDimSize) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstIndex, elementInSlice;
if (IndexIsMajor) {
dstIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
dstIndex = linearIndex % innerSize;
}
IndexType srcIndex =
indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(dstIndex, indices)];
CUDA_KERNEL_ASSERT(srcIndex < srcSelectDimSize);
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstSelectDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcSelectDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
namespace {
// When using a 0-dim scalar tensor, we need the legacy (THC) semantics of
// TensorInfo: Pretend that the scalar tensor is in fact a one-element vector.
template <typename T, typename IndexType>
cuda::detail::TensorInfo<T, IndexType>
tensorInfoLegacyIfScalar(cuda::detail::TensorInfo<T, IndexType> ti) {
if (ti.dims == 0) {
ti.dims = 1;
ti.sizes[0] = 1;
ti.strides[0] = 1;
}
return ti;
}
}
template<typename scalar_t>
void index_select_out_cuda_impl(Tensor& out, const Tensor& self, long dim,
const Tensor& index) {
ptrdiff_t numIndices = index.numel();
int selfDims = self.dim() == 0 ? 1 : self.dim();
const hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
TORCH_CHECK(index.dim() <= 1,
"Index is supposed to be an empty tensor or a vector");
TORCH_CHECK(dim < selfDims, "Indexing dim is out of bounds");
std::vector<int64_t> newSize = self.sizes().vec();
if (self.dim() > 0) {
newSize[dim] = numIndices;
}
at::native::resize_(out, newSize, {});
ptrdiff_t outTotalSize = out.numel();
if (outTotalSize == 0) {
return;
}
bool indContig = index.is_contiguous();
// The `self` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
int64_t selfSelectDimSize = self.dim() == 0 ? 1 : self.size(dim);
ptrdiff_t sliceSize = outTotalSize / numIndices;
int mpc = at::cuda::getCurrentDeviceProperties()->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
hipLaunchKernelGGL(( indexSelectSmallIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM>) \
, dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \
outInfo, selfInfo, indicesInfo, \
outSelectDim, selfSelectDim, static_cast<TYPE>(sliceSize), \
selfSelectDimSize);
#define LARGE_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR) \
hipLaunchKernelGGL(( indexSelectLargeIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR>) \
, dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \
outInfo, selfInfo, indicesInfo, \
outSelectDim, selfSelectDim, static_cast<TYPE>(outTotalSize), \
static_cast<TYPE>((IDX_IS_MAJOR) ? sliceSize : numIndices), \
selfSelectDimSize);
dim3 smallIndexGrid(::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(::min(THCCeilDiv(outTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(::min(outTotalSize, (ptrdiff_t)128));
if (cuda::detail::canUse32BitIndexMath(out) &&
cuda::detail::canUse32BitIndexMath(self) &&
cuda::detail::canUse32BitIndexMath(index)) {
auto outInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, unsigned int>(out));
int outSelectDim = outInfo.collapseDims(dim);
outInfo.reduceDim(outSelectDim);
auto selfInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, unsigned int>(self));
int selfSelectDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfSelectDim);
AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_select_out_cuda_impl", [&] () {
auto indicesInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<index_t, unsigned int>(index));
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (outInfo.dims == 1 && selfInfo.dims == 1 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2);
} else if (outInfo.dims == 2 && selfInfo.dims == 2 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2);
} else if (outInfo.dims == 3 && selfInfo.dims == 3 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1);
}
} else {
bool indexIsMajor = indexShouldBeMajor(outInfo, outSelectDim);
if (outInfo.dims == 1 && selfInfo.dims == 1 && indContig) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2, true);
} else if (outInfo.dims == 2 && selfInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, true);
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, false);
}
} else if (outInfo.dims == 3 && selfInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, true);
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, false);
}
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1, true);
}
}
});
} else {
auto outInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, uint64_t>(out));
int outSelectDim = outInfo.collapseDims(dim);
outInfo.reduceDim(outSelectDim);
auto selfInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, uint64_t>(self));
int selfSelectDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfSelectDim);
AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_select_out_cuda_impl", [&] () {
auto indicesInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<index_t, uint64_t>(index));
indicesInfo.collapseDims();
LARGE_INDEX(scalar_t, index_t, uint64_t, -1, -1, -1, true);
});
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
} // anonymous namespace
Tensor& index_select_out_cuda(Tensor& out, const Tensor& self, int64_t dim,
const Tensor& index) {
static constexpr string_view DIM_WARNING =
"Tensor too large or too many (> 25) dimensions";
TORCH_CHECK(at::cuda::check_device({out, self, index}),
"Input, output and indices must be on the current device");
at::assert_no_internal_overlap(out);
dim = at::maybe_wrap_dim(dim, self);
TORCH_CHECK(self.dim() <= MAX_TENSORINFO_DIMS, DIM_WARNING);
TORCH_CHECK(index.dim() <= MAX_TENSORINFO_DIMS, DIM_WARNING);
#if defined(__HIP_PLATFORM_HCC__)
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
out.scalar_type(), "index_select_cuda",
[&] { index_select_out_cuda_impl<scalar_t>(out, self, dim, index); });
#else // __HIP_PLATFORM_HCC__
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(
at::ScalarType::Half, at::ScalarType::Bool,
out.scalar_type(), "index_select_cuda",
[&] { index_select_out_cuda_impl<scalar_t>(out, self, dim, index); });
#endif // __HIP_PLATFORM_HCC__
return out;
}
Tensor index_select_cuda(const Tensor& self, int64_t dim, const Tensor& index) {
Tensor out = at::empty({0}, self.options());
index_select_out_cuda(out, self, dim, index);
return out;
}
template<typename T>
struct NonZeroOp
{
__host__ __device__ __forceinline__ bool operator()(const T& a) const {
return (a!=T(0));
}
};
template<typename scalar_t>
void nonzero_cuda_out_impl(const Tensor& self, Tensor& out){
Tensor self_ = self.contiguous();
int N = self_.numel();
const hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
// compute number of nonzero elements
size_t temp_storage_bytes=0;
auto& allocator = *c10::hip::HIPCachingAllocatorMasqueradingAsCUDA::get();
auto num_nonzeros = allocator.allocate(sizeof(int));
hipcub::TransformInputIterator<bool, NonZeroOp<scalar_t>, scalar_t*> itr(self_.data_ptr<scalar_t>(), NonZeroOp<scalar_t>());
hipcub::DeviceReduce::Sum(nullptr, temp_storage_bytes, itr, (int*)num_nonzeros.get(), N, stream);
auto temp_storage = allocator.allocate(temp_storage_bytes);
hipcub::DeviceReduce::Sum(temp_storage.get(), temp_storage_bytes, itr, (int*)num_nonzeros.get(), N, stream);
int num_nonzeros_h;
C10_HIP_CHECK(hipMemcpyAsync(&num_nonzeros_h, num_nonzeros.get(), sizeof(int), hipMemcpyDeviceToHost, stream));
//need to synchronize to make sure data is available on the host
C10_HIP_CHECK(hipStreamSynchronize(stream));
//expected output size is num_nonzeros x ndim
//we are producing output with size {num_nonzeros, ndim} and strides {num_nonzeros, 1} (that is, transposed ndim x num_nonzeros output)
//we are able to directly use passed output with this size and strides, and we can also (per contract)
//resize passed output with incorrect sizes anyway we want.
//However, out with correct sizes and incorrect strides will have to be copied to from the intermediate we've produced.
bool need_to_copy = out.dim() == 2 && out.sizes()[0] == num_nonzeros_h && out.sizes()[1] == self.dim() && !out.t().is_contiguous();
at::Tensor out_temp = need_to_copy ?
at::native::empty_cuda({self.dim(), num_nonzeros_h}, out.options()) :
out.resize_({self.dim(), num_nonzeros_h});
//Scalars are expected to produce output of size (1,0), so we can't write to it
if (self.dim() > 0) {
hipcub::CountingInputIterator<int64_t> counting_itr(0);
temp_storage_bytes = 0;
hipcub::DeviceSelect::Flagged(nullptr, temp_storage_bytes, counting_itr, itr,
out_temp.data_ptr<int64_t>(), (int*)num_nonzeros.get(), N, stream);
temp_storage = allocator.allocate(temp_storage_bytes);
hipcub::DeviceSelect::Flagged(temp_storage.get(), temp_storage_bytes, counting_itr, itr,
out_temp.data_ptr<int64_t>(), (int*)num_nonzeros.get(), N, stream);
if (num_nonzeros_h > 0 && self.dim() > 1){
int64_t div = 1;
auto thrust_allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
for (int dim = self.dim()-1; dim >= 0; dim--){
int64_t dim_size = self.sizes()[dim];
thrust::transform(
thrust::hip::par(thrust_allocator).on(stream),
thrust::device_ptr<int64_t>(out_temp.data_ptr<int64_t>()),
thrust::device_ptr<int64_t>(out_temp.data_ptr<int64_t>()) + num_nonzeros_h,
thrust::device_ptr<int64_t>(out_temp.data_ptr<int64_t>()) + num_nonzeros_h * dim,
[=] C10_HOST_DEVICE (const int64_t val) {return (val/div) % dim_size;}
);
div *= dim_size;
}
}
}
if (need_to_copy) {
out.copy_(out_temp.t());
} else {
//transpose out so it is correct size
Tensor out_ = out_temp.t();
out.set_(out_);
}
}
Tensor& nonzero_out_cuda(Tensor& out, const Tensor& self){
TORCH_CHECK(self.numel() < std::numeric_limits<int>::max(), "nonzero is not supported for tensors with more than INT_MAX elements, \
file a support request");
TORCH_CHECK(out.dtype() == at::kLong, "Expected object of scalar type ", at::kLong, " as out, but got ", out.dtype());
TORCH_CHECK(self.device() == out.device(), "expected self and out to be on the same device, but got out on ",
out.device(), " and self on ", self.device());
AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Bool, at::ScalarType::BFloat16, at::ScalarType::Half,
self.scalar_type(), "nonzero_cuda",
[&] {nonzero_cuda_out_impl<scalar_t>(self, out);});
return out;
}
Tensor nonzero_cuda(const Tensor& self){
Tensor out = at::native::empty_cuda({0}, self.options().dtype(kLong));
return nonzero_out_cuda(out, self);
}
} // native
} // at
|
9197d20f4425435def721481c632152eb8ff1657.cu
|
#include <ATen/native/TensorAdvancedIndexing.h>
#include <ATen/native/IndexingUtils.h>
#include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/ExpandUtils.h>
#include <ATen/MemoryOverlap.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/cuda/CUDAUtils.h>
#include <THC/THCDeviceUtils.cuh>
#include <THC/THCGeneral.h>
#include <THC/THCTensorSort.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THCThrustAllocator.cuh>
#include <thrust/execution_policy.h>
#include <thrust/sort.h>
#include <thrust/transform.h>
#include <THC/THCAtomics.cuh>
#include <cub/cub.cuh>
#include <c10/macros/Macros.h>
namespace {
template <typename scalar_t, int SZ>
__global__ void indexing_backward_kernel(
int64_t* sorted_indices, int64_t* indices, scalar_t* grad_output, scalar_t* grad_weight,
int64_t numel, int64_t stride, int64_t stride_before, int64_t outer_dim) {
//numel is total number of flattened indices, not expanded to dimensions that are not indexed.
//stride is the cumulative size of the not-indexed last dimensions
//stride_before is the stride of the dimension immediately preceding first indexed dimension
//if indexing starts from the 0th dimension, stride_before does not matter because blockIdx.z will be 0 in this case
//outer_dim is number of elements in the first unindexed dimensions
using accscalar_t = at::acc_type<scalar_t, true>;
// Each warp is responsible for an input into the LookupTable.
// If the preceding input has the same destination index as this input, then the warp
// exits immediately. The warp also processes subsequent inputs with the
// same value.
//
// Input Warp
// 1 <warp 1>
// 1 <warp 1> (<warp 2> exits without doing any work)
// 5 <warp 3>
// 8 <warp 4>
// Number of values processed by each thread (grain size)
for (int64_t z = blockIdx.z; z < outer_dim; z += gridDim.z){
int64_t idx = blockIdx.x * blockDim.y + threadIdx.y;
if (idx < numel
&& (idx == 0 || sorted_indices[idx] != sorted_indices[idx - 1])){
do {
int64_t start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ;
const int64_t weight_row = ((int64_t) sorted_indices[idx]) * stride + z * stride_before;
const int64_t grad_row = ((int64_t) indices[idx]) * stride + z * numel * stride;
const accscalar_t scale = (accscalar_t)1.0;
accscalar_t gradient[SZ];
accscalar_t weight[SZ];
while (start_feature < stride) {
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int64_t feature_dim = start_feature + ii * C10_WARP_SIZE;
if (feature_dim < stride) {
gradient[ii] = static_cast<accscalar_t>(grad_output[grad_row + feature_dim]);
weight[ii] = static_cast<accscalar_t>(grad_weight[weight_row + feature_dim]);
}
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
weight[ii] += gradient[ii] * scale;
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int64_t feature_dim = start_feature + ii * C10_WARP_SIZE;
if (feature_dim < stride) {
grad_weight[weight_row + feature_dim] = static_cast<scalar_t>(weight[ii]);
}
}
start_feature += gridDim.y * blockDim.x * SZ;
}
idx++;
} while (idx < numel && sorted_indices[idx] == sorted_indices[idx - 1]);
}
}
}
}
namespace at { namespace native {
static Tensor wrapIndexOnce(const Tensor & index, int64_t dim, int64_t dim_size, bool check_range=true) {
//we don't need to check range in backward - if there were out of bounds indices forward should already have errored out
if (index.numel() != 0 && check_range) {
auto max_idx = index.max().item<int64_t>();
auto min_idx = index.min().item<int64_t>();
if (max_idx >= dim_size) {
TORCH_CHECK_INDEX(false, "index ", max_idx, " is out of bounds for dimension ", dim, " with size ", dim_size);
}
if (min_idx < -dim_size) {
TORCH_CHECK_INDEX(false, "index ", min_idx, " is out of bounds for dimension ", dim, " with size ", dim_size);
}
}
return index.remainder(dim_size);
}
static std::vector<int64_t> computeLinearStride(const Tensor & tensor) {
// computes the stride as if tensor were contiguous
auto sizes = tensor.sizes();
std::vector<int64_t> stride(tensor.dim());
stride[tensor.dim() - 1] = 1;
std::partial_sum(sizes.rbegin(), sizes.rend() - 1, stride.rbegin() + 1, std::multiplies<int64_t>());
return stride;
}
static std::tuple<Tensor, int64_t, int64_t, int64_t>
computeLinearIndex(const Tensor & src, TensorList indices, bool check_range) {
auto strides = computeLinearStride(src);
const auto& backend = src.type().backend();
// Compute the linear index by multiplying the indexing tensors by the
// stride and summing them. All the indexing tensors have the same shape at
// this point. We also compute the number of dimensions before and after that
// are not being index.
Tensor linearIndex;
int64_t emptyBefore = 0, emptyAfter = 0, nElemBefore = 1, nElemAfter = 1, strideBefore =0;
for (auto i = decltype(src.dim()){0}; i < src.dim(); i++) {
if (indices[i].defined()) {
// Cast index to the longType matching src's backend
// This allows us to support ie indexing a cuda tensor with a cpu tensor
Tensor index = (wrapIndexOnce(indices[i], i, src.size(i), check_range) * strides[i]).toBackend(backend);
if (linearIndex.defined()) {
linearIndex += index;
} else {
linearIndex = index;
if (i>0) {
strideBefore = src.stride(i-1); // stride after undefined dimensions
}
}
} else if (linearIndex.defined()) {
emptyAfter++;
nElemAfter *= src.size(i);
} else {
emptyBefore++;
nElemBefore *= src.size(i);
}
}
return std::make_tuple(std::move(linearIndex), nElemBefore, strideBefore, nElemAfter);
}
static std::tuple<Tensor, Tensor, int64_t, int64_t, int64_t, std::vector<int64_t>> makeLinearIndex(Tensor self, TensorList orig, bool check_range) {
checkIndexTensorTypes(orig);
// first expand BoolTensor (masks) or ByteTensor (masks) into 1 or more LongTensors
auto indices = expandTensors(self, orig);
// next broadcast all index tensors together
indices = expand_outplace(indices);
// add missing null Tensors so that it matches self.dim()
while (indices.size() < (size_t)self.dim()) {
indices.emplace_back();
}
// if the non-null indices are not all adjacent, transpose self and indices
// together so that they're adjacent at the front
std::vector<int64_t> inversePerm;
if (!hasContiguousSubspace(indices)) {
std::tie(self, indices, inversePerm) = transposeToFrontAndInvPerm(self, indices);
}
int64_t nElemBefore, strideBefore, nElemAfter;
Tensor linearIndex;
std::tie(linearIndex, nElemBefore, strideBefore, nElemAfter) = computeLinearIndex(self, indices, check_range);
return std::make_tuple(linearIndex, self, nElemBefore, strideBefore, nElemAfter, inversePerm);
}
namespace {
void index_put_accum_kernel(Tensor & self, TensorList indices, const Tensor & value, bool unsafe) {
if (indices.size() > (size_t)self.dim()) {
TORCH_CHECK_INDEX(false, "too many indices for tensor of dimension ", self.dim(), " (got ", indices.size(), ")");
}
auto value_ = value.contiguous();
Tensor linearIndex, expandedValue, src;
int64_t nElemBefore, strideBefore, sliceSize;
std::vector<int64_t> inversePerm;
std::tie(linearIndex, src, nElemBefore, strideBefore, sliceSize, inversePerm) = makeLinearIndex(self, indices, !unsafe);
int64_t num_indices = linearIndex.numel();
if (num_indices > 0 && sliceSize > 0) {
const bool permuted = !src.is_contiguous();
auto src_ = permuted ? src.contiguous() : src;
linearIndex = linearIndex.reshape(-1);
auto sorted_indices = at::empty_like(linearIndex, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto orig_indices = at::empty_like(linearIndex, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
using device_ptr = thrust::device_ptr<int64_t>;
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
linearIndex.floor_divide_(sliceSize);
{
sorted_indices.copy_(linearIndex);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
// Fill sortedOrigIndices with sequential indices
const auto count_iter = thrust::counting_iterator<int64_t>(0);
auto orig_data = device_ptr(orig_indices.data_ptr<int64_t>());
thrust::copy(policy, count_iter, count_iter + num_indices, orig_data);
// Sort the inputs into sorted with the corresponding indices; we
// don't need a stable or multidimensional sort, so just use Thrust
// directly
// Sort; a stable sort is not required
// NB - not passing comparator causes thrust to use radix sort, and it hurts perf A LOT, at least for medium (few K) sized indices
auto sorted_data = device_ptr(sorted_indices.data_ptr<int64_t>());
thrust::sort_by_key(policy, sorted_data, sorted_data + num_indices, orig_data, ThrustLTOp<int64_t>());
}
TORCH_INTERNAL_ASSERT(linearIndex.numel()*sliceSize*nElemBefore == value.numel(), "number of flattened indices did not match number of elements in the value tensor", linearIndex.numel()*sliceSize*nElemBefore, value.numel());
const int UNROLL = 4;
const int indices_per_block = 4;
dim3 grid(THCCeilDiv(num_indices, (int64_t) indices_per_block),
std::min<int>(at::cuda::getCurrentDeviceProperties()->maxGridSize[1], THCCeilDiv(sliceSize, (int64_t) (C10_WARP_SIZE*UNROLL))),
std::min(std::max<int>(1,nElemBefore), at::cuda::getCurrentDeviceProperties()->maxGridSize[2]));
dim3 block(C10_WARP_SIZE, indices_per_block);
AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
value_.scalar_type(), "indexing_backward", [&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "indexing_backward", [&] {
indexing_backward_kernel<scalar_t, UNROLL><<<grid, block, 0, stream>>>(
sorted_indices.data_ptr<int64_t>(),
orig_indices.data_ptr<int64_t>(),
value_.data_ptr<scalar_t>(),
src_.data_ptr<scalar_t>(),
num_indices,
sliceSize,
strideBefore,
nElemBefore);
});
});
AT_CUDA_CHECK(cudaGetLastError());
if (permuted)
self.copy_(src_.permute(inversePerm));
}
}
REGISTER_CUDA_DISPATCH(index_put_accum_stub, &index_put_accum_kernel);
} //anonymous
// Check tensor dimensions for index operations, and return the slice size.
static ptrdiff_t getSliceSize(const Tensor & dst,
int dim,
const Tensor & index,
const Tensor & src)
{
int dstDims = dst.dim();
int srcDims = src.dim();
TORCH_CHECK(index.dim() <= 1, "Index must be vector or scalar");
ptrdiff_t dstSliceSize = 1;
TORCH_CHECK(dim >= 0 && dim < dstDims, "Indexing dim ", dim, " is out of bounds");
for (int d = 0; d < dstDims; d++) {
if (d != dim) {
dstSliceSize *= dst.size(d);
}
}
TORCH_CHECK(dim < srcDims, "Indexing dim ", dim, " is out of bounds");
TORCH_CHECK(index.numel() == src.size(dim),
"length of src.size[dim] is not equal to length of indices");
ptrdiff_t srcSliceSize = 1;
bool mismatch = false;
if (dstDims != srcDims) mismatch = true;
for (int d = 0; d < srcDims; d++) {
if (d != dim) {
srcSliceSize *= src.size(d);
if (!mismatch && dst.size(d) != src.size(d)) mismatch = true;
}
}
TORCH_CHECK(dstSliceSize == srcSliceSize,
"Source/destination tensor have different slice sizes (%ld vs %ld)",
dstSliceSize, srcSliceSize);
if (mismatch) {
TORCH_WARN_ONCE(
"Warning: source/destination slices have same size but different "
"shape for an index operation. This behavior is deprecated.\n");
}
return dstSliceSize;
}
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexAddLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexAddSmallIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<IndicesType, IndexType> indices,
int dstAddDim,
int srcAddDim,
IndexType innerSize,
int64_t dstAddDimSize,
T alpha) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType srcIndex = 0; srcIndex < indices.sizes[0]; ++srcIndex) {
// Lua indices begin at 1
IndexType dstIndex =
indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(srcIndex, indices)];
CUDA_KERNEL_ASSERT(dstIndex < dstAddDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstAddDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcAddDim];
gpuAtomicAddNoReturn(&dst.data[dstOffset], alpha * src.data[srcOffset]);
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexAddSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexAddLargeIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<IndicesType, IndexType> indices,
int dstAddDim,
int srcAddDim,
IndexType totalSize,
IndexType innerSize,
int64_t dstAddDimSize,
T alpha) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType srcIndex, elementInSlice;
if (IndexIsMajor) {
srcIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
srcIndex = linearIndex % innerSize;
}
// Lua indices begin at 1
IndexType dstIndex =
indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(srcIndex, indices)];
CUDA_KERNEL_ASSERT(dstIndex < dstAddDimSize);
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstAddDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcAddDim];
gpuAtomicAddNoReturn(&dst.data[dstOffset], alpha * src.data[srcOffset]);
}
}
// Compare the stride between adjacent slices (sliceStride) with strides in the
// other dimensions (i.e., strides *inside* each slice).
//
// - Returns true if some dimension inside the slice has lower stride than
// sliceStride. The simplest example is a 2-D contiguous tensor with sliceDim
// == 0 (that is, each slice is a row).
//
// In this case, we choose the CUDA kernel that processes the data in
// "index-major order". For example, if thread count equals slice size, then
// all threads process slice #0 in lockstep, and then slice #1, and so on.
//
// - Otherwise (i.e., sliceStride has the lowest value), this function returns
// false. The simplest example is a 2-D contiguous tensor with sliceDim == 1
// (each slice is a column).
//
// In this case, we choose the CUDA kernel that processes the data in
// "elementInSlice-major order". For example, each thread can process element
// #0 of every slice, and then element #1 of every slice, and so on.
template <typename scalar_t>
bool indexShouldBeMajor(cuda::detail::TensorInfo<scalar_t, unsigned int> &info,
int sliceDim)
{
// The stride between adjacent slices (e.g., between element #0 of slice #100
// and element #0 of slice #101).
unsigned int sliceStride = info.strides[sliceDim];
for (int i = 0; i < info.dims; ++i) {
if (i != sliceDim && info.sizes[i] > 1 && info.strides[i] < sliceStride) {
return true;
}
}
return false;
}
Tensor& index_add_cuda_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & source, const at::Scalar alpha) {
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("index_add_cuda_");
dim = maybe_wrap_dim(dim, self.dim());
TensorArg self_arg{self, "self", 1}, index_arg{index, "index", 3}, source_arg{source, "source", 4};
checkAllSameGPU("index_add", {self_arg, index_arg, source_arg});
TORCH_CHECK_INDEX(index.dim() <= 1, "index_add_(): Index is supposed to be a vector");
TORCH_CHECK(index.scalar_type() == ScalarType::Long || index.scalar_type() == ScalarType::Int, "index_add_(): Expected dtype int32/int64 for index");
TORCH_CHECK(self.scalar_type() == source.scalar_type(),
"index_add_(): self and source must have the same scalar type");
TORCH_CHECK(dim == 0 || dim < source.dim(),
"index_add_(): Indexing dim ", dim, " is out of bounds of tensor");
TORCH_CHECK(index.numel() == (source.dim() == 0 ? 1 : source.size(dim)),
"index_add_(): Number of indices should be equal to self.size(dim)");
// Scalars are treated as 1-d tensor
Tensor self_ = (self.dim() == 0) ? self.view(1) : self;
Tensor source_ = (source.dim() == 0) ? source.view(1) : source;
TORCH_CHECK(self.dim() <= MAX_CUTORCH_DIMS, CUTORCH_DIM_WARNING);
TORCH_CHECK(source.dim() <= MAX_CUTORCH_DIMS, CUTORCH_DIM_WARNING);
TORCH_CHECK(index.dim() <= MAX_CUTORCH_DIMS, CUTORCH_DIM_WARNING);
at::assert_no_internal_overlap(self);
at::assert_no_partial_overlap(self, index);
at::assert_no_partial_overlap(self, source);
// The `source` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of index we are choosing, which is the total size
// of the tensor `index`.
ptrdiff_t sliceSize = getSliceSize(self_, dim, index, source_);
ptrdiff_t sourceTotalSize = source.numel();
int64_t selfAddDimSize = self_.size(dim);
ptrdiff_t numIndex = index.numel();
if (sliceSize == 0) {
return self;
}
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
bool indContig = index.is_contiguous();
int mpc = at::cuda::getCurrentDeviceProperties()->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, SELF_DIM, SOURCE_DIM, IDX_DIM) \
indexAddSmallIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, SELF_DIM, SOURCE_DIM, IDX_DIM> \
<<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \
selfInfo, sourceInfo, indexInfo, \
selfAddDim, sourceAddDim, sliceSize, selfAddDimSize, alpha_scalar);
#define LARGE_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, \
SELF_DIM, SOURCE_DIM, IDX_DIM, IDX_IS_MAJOR) \
indexAddLargeIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, \
SELF_DIM, SOURCE_DIM, IDX_DIM, IDX_IS_MAJOR> \
<<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \
selfInfo, sourceInfo, indexInfo, \
selfAddDim, sourceAddDim, sourceTotalSize, \
(IDX_IS_MAJOR) ? sliceSize : numIndex, \
selfAddDimSize, alpha_scalar);
dim3 smallIndexGrid(std::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(std::min(THCCeilDiv(sourceTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(std::min(sourceTotalSize, (ptrdiff_t)128));
if (cuda::detail::canUse32BitIndexMath(self) &&
cuda::detail::canUse32BitIndexMath(source) &&
cuda::detail::canUse32BitIndexMath(index)) {
AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "index_add", [&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "index_add", [&] {
cuda::detail::TensorInfo<scalar_t, unsigned int> selfInfo =
cuda::detail::getTensorInfo<scalar_t, unsigned int>(self_);
int selfAddDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfAddDim);
AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_add_cuda_", [&] () {
auto sourceInfo =
cuda::detail::getTensorInfo<scalar_t, unsigned int>(source_);
int sourceAddDim = sourceInfo.collapseDims(dim);
sourceInfo.reduceDim(sourceAddDim);
auto indexInfo =
cuda::detail::getTensorInfo<index_t, unsigned int>(index);
indexInfo.collapseDims();
auto alpha_scalar = alpha.to<scalar_t>();
// A reasonable choice for when to have each thread iterate over
// index to choose
if (numIndex <= 16) {
if (selfInfo.dims == 1 && sourceInfo.dims == 1 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2);
} else if (selfInfo.dims == 2 && sourceInfo.dims == 2 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2);
} else if (selfInfo.dims == 3 && sourceInfo.dims == 3 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1);
}
} else {
bool indexIsMajor = indexShouldBeMajor(selfInfo, selfAddDim);
if (selfInfo.dims == 1 && sourceInfo.dims == 1 && indContig) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2, true);
} else if (selfInfo.dims == 2 && sourceInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, true);
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, false);
}
} else if (selfInfo.dims == 3 && sourceInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, true);
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, false);
}
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1, true);
}
}
});
});
});
} else {
AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "index_add", [&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "index_add", [&] {
cuda::detail::TensorInfo<scalar_t, uint64_t> selfInfo =
cuda::detail::getTensorInfo<scalar_t, uint64_t>(self_);
int selfAddDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfAddDim);
cuda::detail::TensorInfo<scalar_t, uint64_t> sourceInfo =
cuda::detail::getTensorInfo<scalar_t, uint64_t>(source_);
int sourceAddDim = sourceInfo.collapseDims(dim);
sourceInfo.reduceDim(sourceAddDim);
AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_add_cuda_", [&] () {
cuda::detail::TensorInfo<index_t, uint64_t> indexInfo =
cuda::detail::getTensorInfo<index_t, uint64_t>(index);
indexInfo.collapseDims();
auto alpha_scalar = alpha.to<scalar_t>();
LARGE_INDEX(scalar_t, index_t, uint64_t, -1, -1, -1, true);
});
});
});
}
return self;
#undef SMALL_INDEX
#undef LARGE_INDEX
}
namespace {
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexSelectLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexSelectSmallIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<IndicesType, IndexType> indices,
int dstSelectDim,
int srcSelectDim,
IndexType innerSize,
int64_t srcSelectDimSize) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType dstIndex = 0; dstIndex < indices.sizes[0]; ++dstIndex) {
IndexType srcIndex =
indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(dstIndex, indices)];
CUDA_KERNEL_ASSERT(srcIndex < srcSelectDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstSelectDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcSelectDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexSelectSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexSelectLargeIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<IndicesType, IndexType> indices,
int dstSelectDim,
int srcSelectDim,
IndexType totalSize,
IndexType innerSize,
int64_t srcSelectDimSize) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstIndex, elementInSlice;
if (IndexIsMajor) {
dstIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
dstIndex = linearIndex % innerSize;
}
IndexType srcIndex =
indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(dstIndex, indices)];
CUDA_KERNEL_ASSERT(srcIndex < srcSelectDimSize);
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstSelectDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcSelectDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
namespace {
// When using a 0-dim scalar tensor, we need the legacy (THC) semantics of
// TensorInfo: Pretend that the scalar tensor is in fact a one-element vector.
template <typename T, typename IndexType>
cuda::detail::TensorInfo<T, IndexType>
tensorInfoLegacyIfScalar(cuda::detail::TensorInfo<T, IndexType> ti) {
if (ti.dims == 0) {
ti.dims = 1;
ti.sizes[0] = 1;
ti.strides[0] = 1;
}
return ti;
}
}
template<typename scalar_t>
void index_select_out_cuda_impl(Tensor& out, const Tensor& self, long dim,
const Tensor& index) {
ptrdiff_t numIndices = index.numel();
int selfDims = self.dim() == 0 ? 1 : self.dim();
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
TORCH_CHECK(index.dim() <= 1,
"Index is supposed to be an empty tensor or a vector");
TORCH_CHECK(dim < selfDims, "Indexing dim is out of bounds");
std::vector<int64_t> newSize = self.sizes().vec();
if (self.dim() > 0) {
newSize[dim] = numIndices;
}
at::native::resize_(out, newSize, {});
ptrdiff_t outTotalSize = out.numel();
if (outTotalSize == 0) {
return;
}
bool indContig = index.is_contiguous();
// The `self` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
int64_t selfSelectDimSize = self.dim() == 0 ? 1 : self.size(dim);
ptrdiff_t sliceSize = outTotalSize / numIndices;
int mpc = at::cuda::getCurrentDeviceProperties()->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
indexSelectSmallIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM> \
<<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \
outInfo, selfInfo, indicesInfo, \
outSelectDim, selfSelectDim, static_cast<TYPE>(sliceSize), \
selfSelectDimSize);
#define LARGE_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR) \
indexSelectLargeIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR> \
<<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \
outInfo, selfInfo, indicesInfo, \
outSelectDim, selfSelectDim, static_cast<TYPE>(outTotalSize), \
static_cast<TYPE>((IDX_IS_MAJOR) ? sliceSize : numIndices), \
selfSelectDimSize);
dim3 smallIndexGrid(std::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(std::min(THCCeilDiv(outTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(std::min(outTotalSize, (ptrdiff_t)128));
if (cuda::detail::canUse32BitIndexMath(out) &&
cuda::detail::canUse32BitIndexMath(self) &&
cuda::detail::canUse32BitIndexMath(index)) {
auto outInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, unsigned int>(out));
int outSelectDim = outInfo.collapseDims(dim);
outInfo.reduceDim(outSelectDim);
auto selfInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, unsigned int>(self));
int selfSelectDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfSelectDim);
AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_select_out_cuda_impl", [&] () {
auto indicesInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<index_t, unsigned int>(index));
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (outInfo.dims == 1 && selfInfo.dims == 1 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2);
} else if (outInfo.dims == 2 && selfInfo.dims == 2 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2);
} else if (outInfo.dims == 3 && selfInfo.dims == 3 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1);
}
} else {
bool indexIsMajor = indexShouldBeMajor(outInfo, outSelectDim);
if (outInfo.dims == 1 && selfInfo.dims == 1 && indContig) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2, true);
} else if (outInfo.dims == 2 && selfInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, true);
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, false);
}
} else if (outInfo.dims == 3 && selfInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, true);
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, false);
}
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1, true);
}
}
});
} else {
auto outInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, uint64_t>(out));
int outSelectDim = outInfo.collapseDims(dim);
outInfo.reduceDim(outSelectDim);
auto selfInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, uint64_t>(self));
int selfSelectDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfSelectDim);
AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_select_out_cuda_impl", [&] () {
auto indicesInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<index_t, uint64_t>(index));
indicesInfo.collapseDims();
LARGE_INDEX(scalar_t, index_t, uint64_t, -1, -1, -1, true);
});
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
} // anonymous namespace
Tensor& index_select_out_cuda(Tensor& out, const Tensor& self, int64_t dim,
const Tensor& index) {
static constexpr string_view DIM_WARNING =
"Tensor too large or too many (> 25) dimensions";
TORCH_CHECK(at::cuda::check_device({out, self, index}),
"Input, output and indices must be on the current device");
at::assert_no_internal_overlap(out);
dim = at::maybe_wrap_dim(dim, self);
TORCH_CHECK(self.dim() <= MAX_TENSORINFO_DIMS, DIM_WARNING);
TORCH_CHECK(index.dim() <= MAX_TENSORINFO_DIMS, DIM_WARNING);
#if defined(__HIP_PLATFORM_HCC__)
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
out.scalar_type(), "index_select_cuda",
[&] { index_select_out_cuda_impl<scalar_t>(out, self, dim, index); });
#else // __HIP_PLATFORM_HCC__
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(
at::ScalarType::Half, at::ScalarType::Bool,
out.scalar_type(), "index_select_cuda",
[&] { index_select_out_cuda_impl<scalar_t>(out, self, dim, index); });
#endif // __HIP_PLATFORM_HCC__
return out;
}
Tensor index_select_cuda(const Tensor& self, int64_t dim, const Tensor& index) {
Tensor out = at::empty({0}, self.options());
index_select_out_cuda(out, self, dim, index);
return out;
}
template<typename T>
struct NonZeroOp
{
__host__ __device__ __forceinline__ bool operator()(const T& a) const {
return (a!=T(0));
}
};
template<typename scalar_t>
void nonzero_cuda_out_impl(const Tensor& self, Tensor& out){
Tensor self_ = self.contiguous();
int N = self_.numel();
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
// compute number of nonzero elements
size_t temp_storage_bytes=0;
auto& allocator = *c10::cuda::CUDACachingAllocator::get();
auto num_nonzeros = allocator.allocate(sizeof(int));
cub::TransformInputIterator<bool, NonZeroOp<scalar_t>, scalar_t*> itr(self_.data_ptr<scalar_t>(), NonZeroOp<scalar_t>());
cub::DeviceReduce::Sum(nullptr, temp_storage_bytes, itr, (int*)num_nonzeros.get(), N, stream);
auto temp_storage = allocator.allocate(temp_storage_bytes);
cub::DeviceReduce::Sum(temp_storage.get(), temp_storage_bytes, itr, (int*)num_nonzeros.get(), N, stream);
int num_nonzeros_h;
C10_CUDA_CHECK(cudaMemcpyAsync(&num_nonzeros_h, num_nonzeros.get(), sizeof(int), cudaMemcpyDeviceToHost, stream));
//need to synchronize to make sure data is available on the host
C10_CUDA_CHECK(cudaStreamSynchronize(stream));
//expected output size is num_nonzeros x ndim
//we are producing output with size {num_nonzeros, ndim} and strides {num_nonzeros, 1} (that is, transposed ndim x num_nonzeros output)
//we are able to directly use passed output with this size and strides, and we can also (per contract)
//resize passed output with incorrect sizes anyway we want.
//However, out with correct sizes and incorrect strides will have to be copied to from the intermediate we've produced.
bool need_to_copy = out.dim() == 2 && out.sizes()[0] == num_nonzeros_h && out.sizes()[1] == self.dim() && !out.t().is_contiguous();
at::Tensor out_temp = need_to_copy ?
at::native::empty_cuda({self.dim(), num_nonzeros_h}, out.options()) :
out.resize_({self.dim(), num_nonzeros_h});
//Scalars are expected to produce output of size (1,0), so we can't write to it
if (self.dim() > 0) {
cub::CountingInputIterator<int64_t> counting_itr(0);
temp_storage_bytes = 0;
cub::DeviceSelect::Flagged(nullptr, temp_storage_bytes, counting_itr, itr,
out_temp.data_ptr<int64_t>(), (int*)num_nonzeros.get(), N, stream);
temp_storage = allocator.allocate(temp_storage_bytes);
cub::DeviceSelect::Flagged(temp_storage.get(), temp_storage_bytes, counting_itr, itr,
out_temp.data_ptr<int64_t>(), (int*)num_nonzeros.get(), N, stream);
if (num_nonzeros_h > 0 && self.dim() > 1){
int64_t div = 1;
auto thrust_allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
for (int dim = self.dim()-1; dim >= 0; dim--){
int64_t dim_size = self.sizes()[dim];
thrust::transform(
thrust::cuda::par(thrust_allocator).on(stream),
thrust::device_ptr<int64_t>(out_temp.data_ptr<int64_t>()),
thrust::device_ptr<int64_t>(out_temp.data_ptr<int64_t>()) + num_nonzeros_h,
thrust::device_ptr<int64_t>(out_temp.data_ptr<int64_t>()) + num_nonzeros_h * dim,
[=] C10_HOST_DEVICE (const int64_t val) {return (val/div) % dim_size;}
);
div *= dim_size;
}
}
}
if (need_to_copy) {
out.copy_(out_temp.t());
} else {
//transpose out so it is correct size
Tensor out_ = out_temp.t();
out.set_(out_);
}
}
Tensor& nonzero_out_cuda(Tensor& out, const Tensor& self){
TORCH_CHECK(self.numel() < std::numeric_limits<int>::max(), "nonzero is not supported for tensors with more than INT_MAX elements, \
file a support request");
TORCH_CHECK(out.dtype() == at::kLong, "Expected object of scalar type ", at::kLong, " as out, but got ", out.dtype());
TORCH_CHECK(self.device() == out.device(), "expected self and out to be on the same device, but got out on ",
out.device(), " and self on ", self.device());
AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Bool, at::ScalarType::BFloat16, at::ScalarType::Half,
self.scalar_type(), "nonzero_cuda",
[&] {nonzero_cuda_out_impl<scalar_t>(self, out);});
return out;
}
Tensor nonzero_cuda(const Tensor& self){
Tensor out = at::native::empty_cuda({0}, self.options().dtype(kLong));
return nonzero_out_cuda(out, self);
}
} // native
} // at
|
090626ace0e6c6fe5b3f70903064b69852d37d37.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <solvers/kaczmarz_solver.h>
#include <solvers/block_common_solver.h>
#include <thrust/transform.h>
#include <basic_types.h>
#include <string.h>
#include <cutil.h>
#include <util.h>
#include <miscmath.h>
#include <sm_utils.inl>
namespace amgx
{
// -----------
// Kernels
// -----------
/*************************************************************************
* "random" hash function for both device and host
************************************************************************/
__host__ __device__ static int ourHash(const int i, const int max)
{
unsigned int a = i;
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) + (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a ^ 0xd3a2646c) + (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) + (a >> 16);
return int(((a ^ 0x4a51e590) / (float)UINT_MAX) * max);
}
struct prg
{
float a, b;
int max_int;
__host__ __device__
prg(int _max_int, float _a = 0.f, float _b = 1.f) : a(_a), b(_b), max_int(_max_int) {};
__host__ __device__
int operator()(const unsigned int n) const
{
int ru = ourHash(n, max_int);
return (ru);
}
};
template <class Vector>
void initRandom(Vector &vec, int size, int max_int)
{
vec.resize(size);
thrust::counting_iterator<unsigned int> index_sequence_begin(0);
thrust::transform(index_sequence_begin,
index_sequence_begin + size,
vec.begin(),
prg(max_int));
}
template<typename IndexType, typename ValueTypeA, typename ValueTypeB>
__global__ void compute_anorm_kernel(const IndexType num_rows,
const IndexType *Ap,
const IndexType *Aj,
const ValueTypeA *Ax,
ValueTypeA *d)
{
IndexType tidx = blockDim.x * blockIdx.x + threadIdx.x;
for (int ridx = tidx; ridx < num_rows; ridx += blockDim.x * gridDim.x)
{
ValueTypeB d_ = 0;
IndexType row_start = Ap[ridx];
IndexType row_end = Ap[ridx + 1];
for (int j = row_start; j < row_end; j++)
{
ValueTypeB Aij = Ax[j];
d_ += Aij * Aij;
}
// Store L2-norm
d[ridx] = d_;
}
}
template<typename IndexType, typename ValueTypeA, typename ValueTypeB>
__global__ void compute_multicolor_anorm_kernel(const IndexType num_rows,
const IndexType *Ap,
const IndexType *Aj,
const ValueTypeA *Ax,
const ValueTypeA *d,
const int *sorted_rows_by_color,
const int num_rows_per_color)
{
IndexType tidx = blockDim.x * blockIdx.x + threadIdx.x;
for (int ridx = tidx; ridx < num_rows_per_color; ridx += blockDim.x * gridDim.x)
{
int i = sorted_rows_by_color[ridx];
ValueTypeB d_ = 0;
IndexType row_start = Ap[i];
IndexType row_end = Ap[i + 1];
for (int j = row_start; j < row_end; j++)
{
ValueTypeB Aij = Ax[j];
d_ += Aij * Aij;
}
// Store L2-norm
d[ridx] = d_;
}
}
template<typename IndexType, typename ValueTypeA, typename ValueTypeB>
__global__ void compute_cumul_inv_kernel(const IndexType a_cum_num_rows,
ValueTypeA *a_cum,
ValueTypeB d_inv,
int c_inv_sz,
IndexType *c_inv)
{
const int tidx = blockDim.x * blockIdx.x + threadIdx.x;
int idx, idx1, idx2;
for (int ridx = tidx; ridx < a_cum_num_rows; ridx += blockDim.x * gridDim.x)
{
// printf("%d %f %f\n", ridx, d_inv, a_cum[ridx]);
//printf("%f\n", a_cum[ridx]);
double a = a_cum[ridx];
// if (ridx < 0 || ridx >= a_cum_num_rows)
// printf("!! %d %d\n", ridx, idx);
idx1 = int(a / d_inv) - 1; // get index in inverse table (floor - 1)
if (ridx < a_cum_num_rows - 1)
{
idx2 = a_cum[ridx + 1] / d_inv - 1; // get index in inverse table (floor - 1)
}
else
{
idx2 = c_inv_sz;
}
// printf("%d %d\n", idx1, idx2);
for ( idx = idx1; idx < idx2; idx++)
{
if (idx >= c_inv_sz || idx < 0)
{
printf("Ai! %d %d\n", idx, ridx);
}
c_inv[idx] = ridx;
}
}
}
template<typename IndexType, typename ValueTypeA, typename ValueTypeB>
__global__ void compute_amax_kernel(const IndexType num_rows,
const IndexType *Ap,
const IndexType *Aj,
const ValueTypeA *Ax,
IndexType *amax_idx)
{
ValueTypeA maxVal(0), avalue;
IndexType jmax;
IndexType tidx = blockDim.x * blockIdx.x + threadIdx.x;
for (int ridx = tidx; ridx < num_rows; ridx += blockDim.x * gridDim.x)
{
IndexType row_start = Ap[ridx];
IndexType row_end = Ap[ridx + 1];
jmax = row_start;
for (int j = row_start; j < row_end; j++)
{
avalue = Ax[j];
if (avalue > maxVal)
{
maxVal = avalue;
jmax = j;
}
}
// Store position of maxvalue
amax_idx[ridx] = jmax;
}
}
template<typename IndexType, typename ValueTypeA, typename ValueTypeB>
__global__ void kaczmarz_smooth_kernel_naive_atomics(const IndexType num_rows,
const IndexType *Ap,
const IndexType *Aj,
const ValueTypeA *Ax,
const ValueTypeA *d,
const ValueTypeB *b,
const ValueTypeB *x,
ValueTypeB *xout,
const IndexType row_offset)
{
// Naive implementation, needs x copy in xout at the very beginning
IndexType tidx = blockDim.x * blockIdx.x + threadIdx.x;
for (int ridx = row_offset + tidx; ridx < num_rows; ridx += blockDim.x * gridDim.x)
{
IndexType row_start = Ap[ridx];
IndexType row_end = Ap[ridx + 1];
ValueTypeB Axi = 0.0;
ValueTypeB r;
for (int j = row_start; j < row_end; j++)
{
Axi += Ax[j] * xout[Aj[j]];
}
r = (b[ridx] - Axi) / ( isNotCloseToZero( d[ridx]) ? d[ridx] : epsilon(d[ridx]) );
for (int j = row_start; j < row_end; j++)
{
//xout[Aj[j]] += r*Ax[j];
utils::atomic_add(&xout[Aj[j]], r * Ax[j]);
}
}
}
template<typename IndexType, typename ValueTypeA, typename ValueTypeB, int kCtaSize>
__global__ void kaczmarz_smooth_kernel_warp_atomics(const IndexType num_rows,
const IndexType *Ap,
const IndexType *Aj,
const ValueTypeA *Ax,
const ValueTypeA *d,
const ValueTypeB *b,
const ValueTypeB *x,
ValueTypeB *xout,
const IndexType row_offset)
{
const int num_warps = kCtaSize / 32;
const int num_rows_per_iter = num_warps * gridDim.x;
const int warpId = threadIdx.x / 32;
const int laneId = threadIdx.x % 32;
for ( int ridx = blockIdx.x * num_warps + warpId ; ridx < num_rows ;
ridx += num_rows_per_iter )
{
IndexType row_start = Ap[ridx];
IndexType row_end = Ap[ridx + 1];
ValueTypeB Axi = 0.0;
ValueTypeB r;
for (int j = row_start + laneId; utils::any( j < row_end) ; j += 32)
{
ValueTypeB aValue = j < row_end ? Ax[j] : ValueTypeB(0);
ValueTypeB xValue = j < row_end ? xout[Aj[j]] : ValueTypeB(0);
r = utils::warp_reduce<1, utils::Add>(aValue * xValue);
Axi += r;
}
r = (b[ridx] - Axi) / ( isNotCloseToZero( d[ridx]) ? d[ridx] : epsilon(d[ridx]) );
for (int j = row_start + laneId; utils::any( j < row_end) ; j += 32)
{
//ValueTypeB dx = j < row_end ? r*Ax[j] : ValueTypeB(0);
//int aj = j < row_end ? r*Ax[j] : ValueTypeB(0);
if (j < row_end)
{
utils::atomic_add(&xout[Aj[j]], r * Ax[j]);
}
}
}
}
template<typename IndexType, typename ValueTypeA, typename ValueTypeB, int kCtaSize>
__global__ void randomized_kaczmarz_smooth_kernel_warp_atomics(const IndexType num_rows,
const IndexType *Ap,
const IndexType *Aj,
const ValueTypeA *Ax,
const IndexType *c_inv,
const IndexType *rnd_rows,
const ValueTypeB *b,
const ValueTypeB *x,
ValueTypeB *xout,
const IndexType row_offset)
{
const int num_warps = kCtaSize / 32;
const int num_rows_per_iter = num_warps * gridDim.x;
const int warpId = threadIdx.x / 32;
const int laneId = threadIdx.x % 32;
for ( int ridx = blockIdx.x * num_warps + warpId ; ridx < num_rows ;
ridx += num_rows_per_iter )
{
int irow = c_inv[rnd_rows[ridx]];
IndexType row_start = Ap[irow];
IndexType row_end = Ap[irow + 1];
ValueTypeB Axi = 0.0;
ValueTypeB r;
ValueTypeA aa;
ValueTypeA AA = 0.0;
for (int j = row_start + laneId; utils::any( j < row_end) ; j += 32)
{
ValueTypeB aValue = j < row_end ? Ax[j] : ValueTypeB(0);
ValueTypeB xValue = j < row_end ? xout[Aj[j]] : ValueTypeB(0);
r = utils::warp_reduce<1, utils::Add>(aValue * xValue);
aa = utils::warp_reduce<1, utils::Add>(aValue * aValue);
Axi += r;
AA += aa;
}
r = (b[ridx] - Axi) / ( isNotCloseToZero( AA) ? AA : epsilon(AA) );
for (int j = row_start + laneId; utils::any( j < row_end) ; j += 32)
{
//ValueTypeB dx = j < row_end ? r*Ax[j] : ValueTypeB(0);
//int aj = j < row_end ? r*Ax[j] : ValueTypeB(0);
if (j < row_end)
{
utils::atomic_add(&xout[Aj[j]], r * Ax[j]);
}
}
}
}
template<typename IndexType, typename ValueTypeA, typename ValueTypeB, int kCtaSize>
__global__ void kaczmarz_smooth_kernel(const IndexType num_rows,
const IndexType *Ap,
const IndexType *Aj,
const ValueTypeA *Ax,
const IndexType *amax,
const ValueTypeB *b,
const ValueTypeB *x,
ValueTypeB *xout,
const IndexType row_offset)
{
// Naive implementation, needs x copy in xout at the very beginning
//IndexType tidx = blockDim.x*blockIdx.x + threadIdx.x;
IndexType i, t;
const int num_warps = kCtaSize / 32;
const int num_rows_per_iter = num_warps * gridDim.x;
const int warpId = threadIdx.x / 32;
const int laneId = threadIdx.x % 32;
for ( int ridx = blockIdx.x * num_warps + warpId ; ridx < num_rows ;
ridx += num_rows_per_iter )
{
ValueTypeB Axi = 0.0;
ValueTypeB r;
i = ourHash(ridx, num_rows);
IndexType row_start = Ap[i];
IndexType row_end = Ap[i + 1];
for (int j = row_start + laneId; utils::any( j < row_end) ; j += 32)
{
ValueTypeB aValue = j < row_end ? Ax[j] : ValueTypeB(0);
ValueTypeB xValue = j < row_end ? xout[Aj[j]] : ValueTypeB(0);
r = utils::warp_reduce<1, utils::Add>(aValue * xValue);
Axi += r;
//Axi += utils::Warp_reduce_linear<1,32>::execute<utils::Add,ValueTypeB>(aValue * xValue);
//Axi += Ax[j] * xout[Aj[j]];
printf("j = %d, r = %f\n", j, r);
}
if (laneId == 0)
{
r = (b[i] - Axi);// / ( isNotCloseToZero( d[ridx]) ? d[ridx] : epsilon(d[ridx]) );
t = row_start + ourHash(ridx, row_end - row_start);
printf("ridx=%d, i=%d, t=%d, Aj[t]=%d, r=%f\n", ridx, i, t, Aj[t], r);
xout[Aj[t]] += r * ((row_end - row_start) * Ax[t]) * 0.5;
}
}
}
template<typename IndexType, typename ValueTypeA, typename ValueTypeB>
__global__ void multicolor_kaczmarz_smooth_kernel_naive(const IndexType num_rows,
const IndexType *Ap,
const IndexType *Aj,
const ValueTypeA *Ax,
const ValueTypeA *d,
const ValueTypeB *b,
const ValueTypeB *x,
ValueTypeB weight,
const int *sorted_rows_by_color,
const int num_rows_per_color,
ValueTypeB *xout)
{
int i;
// Naive implementation, needs x copy in xout at the very beginning
IndexType tidx = blockDim.x * blockIdx.x + threadIdx.x;
for (int ridx = tidx; ridx < num_rows_per_color; ridx += blockDim.x * gridDim.x)
{
i = sorted_rows_by_color[ridx];
IndexType row_start = Ap[i];
IndexType row_end = Ap[i + 1];
ValueTypeB Axi = 0.0;
ValueTypeB r;
for (int j = row_start; j < row_end; j++)
{
Axi += Ax[j] * xout[Aj[j]];
}
r = (b[i] - Axi) / ( isNotCloseToZero( d[i]) ? d[i] : epsilon(d[i]) );
for (int j = row_start; j < row_end; j++)
{
utils::atomic_add(&xout[Aj[j]], r * Ax[j]);
}
}
}
template<typename IndexType, typename ValueTypeA, typename ValueTypeB, int kCtaSize>
__global__ void multicolor_kaczmarz_smooth_kernel(const IndexType num_rows,
const IndexType *Ap,
const IndexType *Aj,
const ValueTypeA *Ax,
const ValueTypeA *d,
const ValueTypeB *b,
const ValueTypeB *x,
ValueTypeB weight,
const int *sorted_rows_by_color,
const int num_rows_per_color,
ValueTypeB *xout)
{
const int num_warps = kCtaSize / 32;
const int num_rows_per_iter = num_warps * gridDim.x;
const int warpId = threadIdx.x / 32;
const int laneId = threadIdx.x % 32;
int i;
for ( int ridx = blockIdx.x * num_warps + warpId ; ridx < num_rows_per_color ;
ridx += num_rows_per_iter )
{
i = sorted_rows_by_color[ridx];
IndexType row_start = Ap[i];
IndexType row_end = Ap[i + 1];
ValueTypeB Axi = 0.0;
ValueTypeB r;
for (int j = row_start + laneId; utils::any( j < row_end) ; j += 32)
{
ValueTypeB aValue = j < row_end ? Ax[j] : ValueTypeB(0);
ValueTypeB xValue = j < row_end ? xout[Aj[j]] : ValueTypeB(0);
r = utils::warp_reduce<1, utils::Add>(aValue * xValue);
Axi += r;
}
r = (b[i] - Axi) / ( isNotCloseToZero( d[i]) ? d[i] : epsilon(d[i]) );
for (int j = row_start + laneId; utils::any( j < row_end) ; j += 32)
{
if (j < row_end)
{
utils::atomic_add(&xout[Aj[j]], r * Ax[j]);
}
}
}
}
template<typename IndexType, typename ValueTypeA, typename ValueTypeB>
__global__ void jacobi_smooth_with_0_initial_guess_kernel(const IndexType num_rows,
const ValueTypeA *d,
const ValueTypeB *b,
ValueTypeB *x,
ValueTypeB weight,
const IndexType row_offset)
{
IndexType tidx = blockDim.x * blockIdx.x + threadIdx.x;
for (int ridx = row_offset + tidx; ridx < num_rows; ridx += blockDim.x * gridDim.x)
{
x[ridx] = weight * b[ridx] / ( isNotCloseToZero( d[ridx]) ? d[ridx] : epsilon(d[ridx]) );
}
}
// -----------------
// Methods
// -----------------
// Constructor
template<class T_Config>
KaczmarzSolver_Base<T_Config>::KaczmarzSolver_Base( AMG_Config &cfg, const std::string &cfg_scope) : Solver<T_Config>( cfg, cfg_scope), m_an(0), m_amax(0), m_c_inv(0)
{
weight = cfg.AMG_Config::template getParameter<double>("relaxation_factor", cfg_scope);
this->m_coloring_needed = (cfg.AMG_Config::template getParameter<int>("kaczmarz_coloring_needed", cfg_scope) != 0);
this->m_reorder_cols_by_color_desired = (cfg.AMG_Config::template getParameter<int>("reorder_cols_by_color", cfg_scope) != 0);
this->m_randomized = true;
if (weight == 0)
{
weight = 1.;
amgx_printf("Warning, setting weight to 1 instead of estimating largest_eigen_value in Block Jacobi smoother\n");;
}
}
// Destructor
template<class T_Config>
KaczmarzSolver_Base<T_Config>::~KaczmarzSolver_Base()
{
}
// Solver setup
template<class T_Config>
void
KaczmarzSolver_Base<T_Config>::solver_setup(bool reuse_matrix_structure)
{
m_explicit_A = dynamic_cast<Matrix<T_Config>*>(Base::m_A);
if (!m_explicit_A)
{
FatalError("Kaczmarz solver only works with explicit matrices", AMGX_ERR_INTERNAL);
}
compute_anorm( *this->m_explicit_A );
if (m_randomized) // MC RK is not supported here
{
if (m_coloring_needed)
{
//FatalError("Randomized Kaczmarz solver does not support coloring", AMGX_ERR_INTERNAL);
m_coloring_needed = false;
}
double d_inv = this->m_an[0];
int c_sz = this->m_an.size();
d_inv = thrust::reduce(this->m_an.begin(), this->m_an.end(), d_inv, thrust::minimum<ValueTypeA>());
thrust::inclusive_scan(this->m_an.begin(), this->m_an.end(), this->m_an.begin()); // in-place scan
int c_inv_sz = (this->m_an[c_sz - 1] + d_inv - 1 ) / d_inv;
this->m_c_inv.resize(c_inv_sz, -1);
const size_t THREADS_PER_BLOCK = 128;
const size_t NUM_BLOCKS = ::min(AMGX_GRID_MAX_SIZE, (int)ceil((ValueTypeB)c_sz / (ValueTypeB)THREADS_PER_BLOCK));
if (c_sz > 0)
{
device_vector_alloc<ValueTypeA> aa(c_sz, 1);
hipLaunchKernelGGL(( compute_cumul_inv_kernel<IndexType, ValueTypeA, ValueTypeB>) , dim3((unsigned int)NUM_BLOCKS), dim3((unsigned int)THREADS_PER_BLOCK), 0, 0,
c_sz,
this->m_an.raw(),
d_inv,
c_inv_sz,
this->m_c_inv.raw());
}
hipDeviceSynchronize();
cudaCheckError();
}
}
template<class T_Config>
void KaczmarzSolver_Base<T_Config>::compute_anorm( Matrix<T_Config> &A)
{
this->m_an.resize(A.get_num_rows()*A.get_block_dimx());
ViewType oldView = A.currentView();
A.setView(this->m_explicit_A->getViewExterior());
if (A.get_block_dimx() == 1 && A.get_block_dimy() == 1)
{
compute_anorm_1x1(A);
}
else
{
FatalError("Unsupported block size for KaczmarzSolver", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
A.setView(oldView);
}
template<class T_Config>
void KaczmarzSolver_Base<T_Config>::compute_amax( Matrix<T_Config> &A)
{
this->m_amax.resize(A.get_num_rows()*A.get_block_dimx());
ViewType oldView = A.currentView();
A.setView(this->m_explicit_A->getViewExterior());
if (A.get_block_dimx() == 1 && A.get_block_dimy() == 1)
{
compute_amax_1x1(A);
}
else
{
FatalError("Unsupported block size for KaczmarzSolver", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
A.setView(oldView);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void KaczmarzSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::compute_anorm_1x1(const Matrix_d &A)
{
//DIAG: starnge issues trying to add DIAG property handling
// now leaving !DIAG only
if (A.hasProps(DIAG))
{
FatalError("Unsupported separate diag", AMGX_ERR_NOT_IMPLEMENTED);
}
typedef typename Matrix_d::index_type IndexType;
typedef typename Matrix_d::value_type ValueTypeA;
const size_t THREADS_PER_BLOCK = 128;
const size_t NUM_BLOCKS = ::min(AMGX_GRID_MAX_SIZE, (int)ceil((ValueTypeB)A.get_num_rows() / (ValueTypeB)THREADS_PER_BLOCK));
if (A.get_num_rows() > 0)
{
hipLaunchKernelGGL(( compute_anorm_kernel<IndexType, ValueTypeA, ValueTypeB>) , dim3((unsigned int)NUM_BLOCKS), dim3((unsigned int)THREADS_PER_BLOCK), 0, 0,
(int)A.get_num_rows(),
A.row_offsets.raw(),
A.col_indices.raw(),
A.values.raw(),
this->m_an.raw());
}
cudaCheckError();
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void KaczmarzSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::compute_amax_1x1(const Matrix_d &A)
{
//DIAG: starnge issues trying to add DIAG property handling
// now leaving !DIAG only
if (A.hasProps(DIAG))
{
FatalError("Unsupported separate diag", AMGX_ERR_NOT_IMPLEMENTED);
}
typedef typename Matrix_d::index_type IndexType;
typedef typename Matrix_d::value_type ValueTypeA;
const size_t THREADS_PER_BLOCK = 128;
const size_t NUM_BLOCKS = ::min(AMGX_GRID_MAX_SIZE, (int)ceil((ValueTypeB)A.get_num_rows() / (ValueTypeB)THREADS_PER_BLOCK));
if (A.get_num_rows() > 0)
{
hipLaunchKernelGGL(( compute_amax_kernel<IndexType, ValueTypeA, ValueTypeB>) , dim3((unsigned int)NUM_BLOCKS), dim3((unsigned int)THREADS_PER_BLOCK), 0, 0,
(int)A.get_num_rows(),
A.row_offsets.raw(),
A.col_indices.raw(),
A.values.raw(),
this->m_amax.raw());
}
cudaCheckError();
}
template<class T_Config>
void
KaczmarzSolver_Base<T_Config>::solve_init( VVector &b, VVector &x, bool xIsZero )
{
}
// Solve one iteration
template<class T_Config>
bool
KaczmarzSolver_Base<T_Config>::solve_iteration( VVector &b, VVector &x, bool xIsZero )
{
if (xIsZero) { x.dirtybit = 0; }
if (!this->m_explicit_A->is_matrix_singleGPU())
{
this->m_explicit_A->manager->exchange_halo_async(x, x.tag);
if (this->m_explicit_A->getViewExterior() == this->m_explicit_A->getViewInterior())
{
this->m_explicit_A->manager->exchange_halo_wait(x, x.tag);
}
}
ViewType oldView = this->m_explicit_A->currentView();
ViewType flags;
bool latencyHiding = true;
if (this->m_explicit_A->is_matrix_singleGPU() || (x.dirtybit == 0))
{
latencyHiding = false;
this->m_explicit_A->setViewExterior();
flags = this->m_explicit_A->getViewExterior();
}
else
{
flags = this->m_explicit_A->getViewInterior();
this->m_explicit_A->setViewInterior();
}
if (this->m_explicit_A->get_block_dimx() == 1 && this->m_explicit_A->get_block_dimy() == 1)
{
if (xIsZero)
{
//smooth_with_0_initial_guess_1x1(*this->m_explicit_A, b, x, flags);
smooth_1x1(*this->m_explicit_A, b, x, flags, latencyHiding);
}
else
{
smooth_1x1(*this->m_explicit_A, b, x, flags, latencyHiding);
}
}
else
{
FatalError("Unsupported block size for Kaczmarz_Solver", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
x.dirtybit = 1;
this->m_explicit_A->setView(oldView);
return this->converged( b, x );
}
template<class T_Config>
void
KaczmarzSolver_Base<T_Config>::solve_finalize( VVector &b, VVector &x )
{
}
// Multicolor version
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void KaczmarzSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::smooth_1x1_MC(Matrix_d &A, VVector &b, VVector &x, ViewType separation_flags, bool latency_hiding)
{
typedef typename Matrix_d::index_type IndexType;
typedef typename Matrix_d::value_type ValueTypeA;
ValueTypeB *x_ptr = x.raw();
IndexType num_rows = A.get_num_rows();
const int num_colors = this->m_explicit_A->getMatrixColoring().getNumColors();
const IndexType *A_sorted_rows_by_color_ptr = A.getMatrixColoring().getSortedRowsByColor().raw();
for (int i = 0; i < num_colors; i++)
{
const IndexType color_offset = ((separation_flags & INTERIOR) == 0) ? A.getMatrixColoring().getSeparationOffsetsRowsPerColor()[i] : A.getMatrixColoring().getOffsetsRowsPerColor()[i];
const IndexType num_rows_per_color = ((separation_flags == this->m_explicit_A->getViewInterior()) ? A.getMatrixColoring().getSeparationOffsetsRowsPerColor()[i] : A.getMatrixColoring().getOffsetsRowsPerColor()[i + 1]) - color_offset;
if (num_rows_per_color == 0) { continue; }
const int threads_per_block = 128;
const int blockrows_per_warp = 1;
const int blockrows_per_cta = (threads_per_block / 32) * blockrows_per_warp;
const int num_blocks = ::min( AMGX_GRID_MAX_SIZE, (int) (num_rows_per_color / blockrows_per_cta + 1));
hipLaunchKernelGGL(( multicolor_kaczmarz_smooth_kernel<IndexType, ValueTypeA, ValueTypeB, threads_per_block>) , dim3(num_blocks), dim3(threads_per_block) , 0, 0,
A.get_num_rows(),
A.row_offsets.raw(),
A.col_indices.raw(),
A.values.raw(),
this->m_an.raw(),
b.raw(),
x_ptr,
this->weight,
A_sorted_rows_by_color_ptr + color_offset, num_rows_per_color,
x_ptr);
cudaCheckError();
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void KaczmarzSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::smooth_1x1_naive(Matrix_d &A, VVector &b, VVector &x, ViewType separation_flags, bool latency_hiding)
{
typedef typename Matrix_d::index_type IndexType;
typedef typename Matrix_d::value_type ValueTypeA;
ValueTypeB *x_ptr = x.raw();
IndexType num_rows = A.get_num_rows();
IndexType offset = 0;
// Skipping Multi-GPU logic for now
// Current. Will be exact only with one warp per grid
const int threads_per_block = 32;
const int num_blocks = 1;
if (this->m_randomized)
{
IVector rnd_rows;
int c_inv_sz = this->m_c_inv.size();
initRandom(rnd_rows, A.get_num_rows(), c_inv_sz);
hipLaunchKernelGGL(( randomized_kaczmarz_smooth_kernel_warp_atomics<IndexType, ValueTypeA, ValueTypeB, threads_per_block>) , dim3(num_blocks), dim3(threads_per_block) , 0, 0,
A.get_num_rows(),
A.row_offsets.raw(),
A.col_indices.raw(),
A.values.raw(),
this->m_c_inv.raw(),
rnd_rows.raw(),
b.raw(),
x_ptr,
x_ptr,
offset);
}
else
{
hipLaunchKernelGGL(( kaczmarz_smooth_kernel_warp_atomics<IndexType, ValueTypeA, ValueTypeB, threads_per_block>) , dim3(num_blocks), dim3(threads_per_block) , 0, 0,
A.get_num_rows(),
A.row_offsets.raw(),
A.col_indices.raw(),
A.values.raw(),
this->m_an.raw(),
b.raw(),
x_ptr,
x_ptr,
offset);
}
cudaCheckError();
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void KaczmarzSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::smooth_1x1(Matrix_d &A, VVector &b, VVector &x, ViewType separation_flags, bool latency_hiding)
{
if (this->m_coloring_needed)
{
smooth_1x1_MC(A, b, x, separation_flags, latency_hiding);
}
else
{
smooth_1x1_naive(A, b, x, separation_flags, latency_hiding);
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void KaczmarzSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::smooth_with_0_initial_guess_1x1(Matrix_d &A, VVector &b, VVector &x, ViewType separation_flags)
{
ViewType oldView = A.currentView();
// Process all rows
A.setViewExterior();
ViewType flags = A.getViewExterior();
int offset, num_rows;
A.getOffsetAndSizeForView(flags, &offset, &num_rows);
A.setView(oldView);
cudaCheckError();
}
/****************************************
* Explict instantiations
***************************************/
#define AMGX_CASE_LINE(CASE) template class KaczmarzSolver_Base<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template class KaczmarzSolver<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
} // namespace amgx
|
090626ace0e6c6fe5b3f70903064b69852d37d37.cu
|
/* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <solvers/kaczmarz_solver.h>
#include <solvers/block_common_solver.h>
#include <thrust/transform.h>
#include <basic_types.h>
#include <string.h>
#include <cutil.h>
#include <util.h>
#include <miscmath.h>
#include <sm_utils.inl>
namespace amgx
{
// -----------
// Kernels
// -----------
/*************************************************************************
* "random" hash function for both device and host
************************************************************************/
__host__ __device__ static int ourHash(const int i, const int max)
{
unsigned int a = i;
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) + (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a ^ 0xd3a2646c) + (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) + (a >> 16);
return int(((a ^ 0x4a51e590) / (float)UINT_MAX) * max);
}
struct prg
{
float a, b;
int max_int;
__host__ __device__
prg(int _max_int, float _a = 0.f, float _b = 1.f) : a(_a), b(_b), max_int(_max_int) {};
__host__ __device__
int operator()(const unsigned int n) const
{
int ru = ourHash(n, max_int);
return (ru);
}
};
template <class Vector>
void initRandom(Vector &vec, int size, int max_int)
{
vec.resize(size);
thrust::counting_iterator<unsigned int> index_sequence_begin(0);
thrust::transform(index_sequence_begin,
index_sequence_begin + size,
vec.begin(),
prg(max_int));
}
template<typename IndexType, typename ValueTypeA, typename ValueTypeB>
__global__ void compute_anorm_kernel(const IndexType num_rows,
const IndexType *Ap,
const IndexType *Aj,
const ValueTypeA *Ax,
ValueTypeA *d)
{
IndexType tidx = blockDim.x * blockIdx.x + threadIdx.x;
for (int ridx = tidx; ridx < num_rows; ridx += blockDim.x * gridDim.x)
{
ValueTypeB d_ = 0;
IndexType row_start = Ap[ridx];
IndexType row_end = Ap[ridx + 1];
for (int j = row_start; j < row_end; j++)
{
ValueTypeB Aij = Ax[j];
d_ += Aij * Aij;
}
// Store L2-norm
d[ridx] = d_;
}
}
template<typename IndexType, typename ValueTypeA, typename ValueTypeB>
__global__ void compute_multicolor_anorm_kernel(const IndexType num_rows,
const IndexType *Ap,
const IndexType *Aj,
const ValueTypeA *Ax,
const ValueTypeA *d,
const int *sorted_rows_by_color,
const int num_rows_per_color)
{
IndexType tidx = blockDim.x * blockIdx.x + threadIdx.x;
for (int ridx = tidx; ridx < num_rows_per_color; ridx += blockDim.x * gridDim.x)
{
int i = sorted_rows_by_color[ridx];
ValueTypeB d_ = 0;
IndexType row_start = Ap[i];
IndexType row_end = Ap[i + 1];
for (int j = row_start; j < row_end; j++)
{
ValueTypeB Aij = Ax[j];
d_ += Aij * Aij;
}
// Store L2-norm
d[ridx] = d_;
}
}
template<typename IndexType, typename ValueTypeA, typename ValueTypeB>
__global__ void compute_cumul_inv_kernel(const IndexType a_cum_num_rows,
ValueTypeA *a_cum,
ValueTypeB d_inv,
int c_inv_sz,
IndexType *c_inv)
{
const int tidx = blockDim.x * blockIdx.x + threadIdx.x;
int idx, idx1, idx2;
for (int ridx = tidx; ridx < a_cum_num_rows; ridx += blockDim.x * gridDim.x)
{
// printf("%d %f %f\n", ridx, d_inv, a_cum[ridx]);
//printf("%f\n", a_cum[ridx]);
double a = a_cum[ridx];
// if (ridx < 0 || ridx >= a_cum_num_rows)
// printf("!! %d %d\n", ridx, idx);
idx1 = int(a / d_inv) - 1; // get index in inverse table (floor - 1)
if (ridx < a_cum_num_rows - 1)
{
idx2 = a_cum[ridx + 1] / d_inv - 1; // get index in inverse table (floor - 1)
}
else
{
idx2 = c_inv_sz;
}
// printf("%d %d\n", idx1, idx2);
for ( idx = idx1; idx < idx2; idx++)
{
if (idx >= c_inv_sz || idx < 0)
{
printf("Ai! %d %d\n", idx, ridx);
}
c_inv[idx] = ridx;
}
}
}
template<typename IndexType, typename ValueTypeA, typename ValueTypeB>
__global__ void compute_amax_kernel(const IndexType num_rows,
const IndexType *Ap,
const IndexType *Aj,
const ValueTypeA *Ax,
IndexType *amax_idx)
{
ValueTypeA maxVal(0), avalue;
IndexType jmax;
IndexType tidx = blockDim.x * blockIdx.x + threadIdx.x;
for (int ridx = tidx; ridx < num_rows; ridx += blockDim.x * gridDim.x)
{
IndexType row_start = Ap[ridx];
IndexType row_end = Ap[ridx + 1];
jmax = row_start;
for (int j = row_start; j < row_end; j++)
{
avalue = Ax[j];
if (avalue > maxVal)
{
maxVal = avalue;
jmax = j;
}
}
// Store position of maxvalue
amax_idx[ridx] = jmax;
}
}
template<typename IndexType, typename ValueTypeA, typename ValueTypeB>
__global__ void kaczmarz_smooth_kernel_naive_atomics(const IndexType num_rows,
const IndexType *Ap,
const IndexType *Aj,
const ValueTypeA *Ax,
const ValueTypeA *d,
const ValueTypeB *b,
const ValueTypeB *x,
ValueTypeB *xout,
const IndexType row_offset)
{
// Naive implementation, needs x copy in xout at the very beginning
IndexType tidx = blockDim.x * blockIdx.x + threadIdx.x;
for (int ridx = row_offset + tidx; ridx < num_rows; ridx += blockDim.x * gridDim.x)
{
IndexType row_start = Ap[ridx];
IndexType row_end = Ap[ridx + 1];
ValueTypeB Axi = 0.0;
ValueTypeB r;
for (int j = row_start; j < row_end; j++)
{
Axi += Ax[j] * xout[Aj[j]];
}
r = (b[ridx] - Axi) / ( isNotCloseToZero( d[ridx]) ? d[ridx] : epsilon(d[ridx]) );
for (int j = row_start; j < row_end; j++)
{
//xout[Aj[j]] += r*Ax[j];
utils::atomic_add(&xout[Aj[j]], r * Ax[j]);
}
}
}
template<typename IndexType, typename ValueTypeA, typename ValueTypeB, int kCtaSize>
__global__ void kaczmarz_smooth_kernel_warp_atomics(const IndexType num_rows,
const IndexType *Ap,
const IndexType *Aj,
const ValueTypeA *Ax,
const ValueTypeA *d,
const ValueTypeB *b,
const ValueTypeB *x,
ValueTypeB *xout,
const IndexType row_offset)
{
const int num_warps = kCtaSize / 32;
const int num_rows_per_iter = num_warps * gridDim.x;
const int warpId = threadIdx.x / 32;
const int laneId = threadIdx.x % 32;
for ( int ridx = blockIdx.x * num_warps + warpId ; ridx < num_rows ;
ridx += num_rows_per_iter )
{
IndexType row_start = Ap[ridx];
IndexType row_end = Ap[ridx + 1];
ValueTypeB Axi = 0.0;
ValueTypeB r;
for (int j = row_start + laneId; utils::any( j < row_end) ; j += 32)
{
ValueTypeB aValue = j < row_end ? Ax[j] : ValueTypeB(0);
ValueTypeB xValue = j < row_end ? xout[Aj[j]] : ValueTypeB(0);
r = utils::warp_reduce<1, utils::Add>(aValue * xValue);
Axi += r;
}
r = (b[ridx] - Axi) / ( isNotCloseToZero( d[ridx]) ? d[ridx] : epsilon(d[ridx]) );
for (int j = row_start + laneId; utils::any( j < row_end) ; j += 32)
{
//ValueTypeB dx = j < row_end ? r*Ax[j] : ValueTypeB(0);
//int aj = j < row_end ? r*Ax[j] : ValueTypeB(0);
if (j < row_end)
{
utils::atomic_add(&xout[Aj[j]], r * Ax[j]);
}
}
}
}
template<typename IndexType, typename ValueTypeA, typename ValueTypeB, int kCtaSize>
__global__ void randomized_kaczmarz_smooth_kernel_warp_atomics(const IndexType num_rows,
const IndexType *Ap,
const IndexType *Aj,
const ValueTypeA *Ax,
const IndexType *c_inv,
const IndexType *rnd_rows,
const ValueTypeB *b,
const ValueTypeB *x,
ValueTypeB *xout,
const IndexType row_offset)
{
const int num_warps = kCtaSize / 32;
const int num_rows_per_iter = num_warps * gridDim.x;
const int warpId = threadIdx.x / 32;
const int laneId = threadIdx.x % 32;
for ( int ridx = blockIdx.x * num_warps + warpId ; ridx < num_rows ;
ridx += num_rows_per_iter )
{
int irow = c_inv[rnd_rows[ridx]];
IndexType row_start = Ap[irow];
IndexType row_end = Ap[irow + 1];
ValueTypeB Axi = 0.0;
ValueTypeB r;
ValueTypeA aa;
ValueTypeA AA = 0.0;
for (int j = row_start + laneId; utils::any( j < row_end) ; j += 32)
{
ValueTypeB aValue = j < row_end ? Ax[j] : ValueTypeB(0);
ValueTypeB xValue = j < row_end ? xout[Aj[j]] : ValueTypeB(0);
r = utils::warp_reduce<1, utils::Add>(aValue * xValue);
aa = utils::warp_reduce<1, utils::Add>(aValue * aValue);
Axi += r;
AA += aa;
}
r = (b[ridx] - Axi) / ( isNotCloseToZero( AA) ? AA : epsilon(AA) );
for (int j = row_start + laneId; utils::any( j < row_end) ; j += 32)
{
//ValueTypeB dx = j < row_end ? r*Ax[j] : ValueTypeB(0);
//int aj = j < row_end ? r*Ax[j] : ValueTypeB(0);
if (j < row_end)
{
utils::atomic_add(&xout[Aj[j]], r * Ax[j]);
}
}
}
}
template<typename IndexType, typename ValueTypeA, typename ValueTypeB, int kCtaSize>
__global__ void kaczmarz_smooth_kernel(const IndexType num_rows,
const IndexType *Ap,
const IndexType *Aj,
const ValueTypeA *Ax,
const IndexType *amax,
const ValueTypeB *b,
const ValueTypeB *x,
ValueTypeB *xout,
const IndexType row_offset)
{
// Naive implementation, needs x copy in xout at the very beginning
//IndexType tidx = blockDim.x*blockIdx.x + threadIdx.x;
IndexType i, t;
const int num_warps = kCtaSize / 32;
const int num_rows_per_iter = num_warps * gridDim.x;
const int warpId = threadIdx.x / 32;
const int laneId = threadIdx.x % 32;
for ( int ridx = blockIdx.x * num_warps + warpId ; ridx < num_rows ;
ridx += num_rows_per_iter )
{
ValueTypeB Axi = 0.0;
ValueTypeB r;
i = ourHash(ridx, num_rows);
IndexType row_start = Ap[i];
IndexType row_end = Ap[i + 1];
for (int j = row_start + laneId; utils::any( j < row_end) ; j += 32)
{
ValueTypeB aValue = j < row_end ? Ax[j] : ValueTypeB(0);
ValueTypeB xValue = j < row_end ? xout[Aj[j]] : ValueTypeB(0);
r = utils::warp_reduce<1, utils::Add>(aValue * xValue);
Axi += r;
//Axi += utils::Warp_reduce_linear<1,32>::execute<utils::Add,ValueTypeB>(aValue * xValue);
//Axi += Ax[j] * xout[Aj[j]];
printf("j = %d, r = %f\n", j, r);
}
if (laneId == 0)
{
r = (b[i] - Axi);// / ( isNotCloseToZero( d[ridx]) ? d[ridx] : epsilon(d[ridx]) );
t = row_start + ourHash(ridx, row_end - row_start);
printf("ridx=%d, i=%d, t=%d, Aj[t]=%d, r=%f\n", ridx, i, t, Aj[t], r);
xout[Aj[t]] += r * ((row_end - row_start) * Ax[t]) * 0.5;
}
}
}
template<typename IndexType, typename ValueTypeA, typename ValueTypeB>
__global__ void multicolor_kaczmarz_smooth_kernel_naive(const IndexType num_rows,
const IndexType *Ap,
const IndexType *Aj,
const ValueTypeA *Ax,
const ValueTypeA *d,
const ValueTypeB *b,
const ValueTypeB *x,
ValueTypeB weight,
const int *sorted_rows_by_color,
const int num_rows_per_color,
ValueTypeB *xout)
{
int i;
// Naive implementation, needs x copy in xout at the very beginning
IndexType tidx = blockDim.x * blockIdx.x + threadIdx.x;
for (int ridx = tidx; ridx < num_rows_per_color; ridx += blockDim.x * gridDim.x)
{
i = sorted_rows_by_color[ridx];
IndexType row_start = Ap[i];
IndexType row_end = Ap[i + 1];
ValueTypeB Axi = 0.0;
ValueTypeB r;
for (int j = row_start; j < row_end; j++)
{
Axi += Ax[j] * xout[Aj[j]];
}
r = (b[i] - Axi) / ( isNotCloseToZero( d[i]) ? d[i] : epsilon(d[i]) );
for (int j = row_start; j < row_end; j++)
{
utils::atomic_add(&xout[Aj[j]], r * Ax[j]);
}
}
}
template<typename IndexType, typename ValueTypeA, typename ValueTypeB, int kCtaSize>
__global__ void multicolor_kaczmarz_smooth_kernel(const IndexType num_rows,
const IndexType *Ap,
const IndexType *Aj,
const ValueTypeA *Ax,
const ValueTypeA *d,
const ValueTypeB *b,
const ValueTypeB *x,
ValueTypeB weight,
const int *sorted_rows_by_color,
const int num_rows_per_color,
ValueTypeB *xout)
{
const int num_warps = kCtaSize / 32;
const int num_rows_per_iter = num_warps * gridDim.x;
const int warpId = threadIdx.x / 32;
const int laneId = threadIdx.x % 32;
int i;
for ( int ridx = blockIdx.x * num_warps + warpId ; ridx < num_rows_per_color ;
ridx += num_rows_per_iter )
{
i = sorted_rows_by_color[ridx];
IndexType row_start = Ap[i];
IndexType row_end = Ap[i + 1];
ValueTypeB Axi = 0.0;
ValueTypeB r;
for (int j = row_start + laneId; utils::any( j < row_end) ; j += 32)
{
ValueTypeB aValue = j < row_end ? Ax[j] : ValueTypeB(0);
ValueTypeB xValue = j < row_end ? xout[Aj[j]] : ValueTypeB(0);
r = utils::warp_reduce<1, utils::Add>(aValue * xValue);
Axi += r;
}
r = (b[i] - Axi) / ( isNotCloseToZero( d[i]) ? d[i] : epsilon(d[i]) );
for (int j = row_start + laneId; utils::any( j < row_end) ; j += 32)
{
if (j < row_end)
{
utils::atomic_add(&xout[Aj[j]], r * Ax[j]);
}
}
}
}
template<typename IndexType, typename ValueTypeA, typename ValueTypeB>
__global__ void jacobi_smooth_with_0_initial_guess_kernel(const IndexType num_rows,
const ValueTypeA *d,
const ValueTypeB *b,
ValueTypeB *x,
ValueTypeB weight,
const IndexType row_offset)
{
IndexType tidx = blockDim.x * blockIdx.x + threadIdx.x;
for (int ridx = row_offset + tidx; ridx < num_rows; ridx += blockDim.x * gridDim.x)
{
x[ridx] = weight * b[ridx] / ( isNotCloseToZero( d[ridx]) ? d[ridx] : epsilon(d[ridx]) );
}
}
// -----------------
// Methods
// -----------------
// Constructor
template<class T_Config>
KaczmarzSolver_Base<T_Config>::KaczmarzSolver_Base( AMG_Config &cfg, const std::string &cfg_scope) : Solver<T_Config>( cfg, cfg_scope), m_an(0), m_amax(0), m_c_inv(0)
{
weight = cfg.AMG_Config::template getParameter<double>("relaxation_factor", cfg_scope);
this->m_coloring_needed = (cfg.AMG_Config::template getParameter<int>("kaczmarz_coloring_needed", cfg_scope) != 0);
this->m_reorder_cols_by_color_desired = (cfg.AMG_Config::template getParameter<int>("reorder_cols_by_color", cfg_scope) != 0);
this->m_randomized = true;
if (weight == 0)
{
weight = 1.;
amgx_printf("Warning, setting weight to 1 instead of estimating largest_eigen_value in Block Jacobi smoother\n");;
}
}
// Destructor
template<class T_Config>
KaczmarzSolver_Base<T_Config>::~KaczmarzSolver_Base()
{
}
// Solver setup
template<class T_Config>
void
KaczmarzSolver_Base<T_Config>::solver_setup(bool reuse_matrix_structure)
{
m_explicit_A = dynamic_cast<Matrix<T_Config>*>(Base::m_A);
if (!m_explicit_A)
{
FatalError("Kaczmarz solver only works with explicit matrices", AMGX_ERR_INTERNAL);
}
compute_anorm( *this->m_explicit_A );
if (m_randomized) // MC RK is not supported here
{
if (m_coloring_needed)
{
//FatalError("Randomized Kaczmarz solver does not support coloring", AMGX_ERR_INTERNAL);
m_coloring_needed = false;
}
double d_inv = this->m_an[0];
int c_sz = this->m_an.size();
d_inv = thrust::reduce(this->m_an.begin(), this->m_an.end(), d_inv, thrust::minimum<ValueTypeA>());
thrust::inclusive_scan(this->m_an.begin(), this->m_an.end(), this->m_an.begin()); // in-place scan
int c_inv_sz = (this->m_an[c_sz - 1] + d_inv - 1 ) / d_inv;
this->m_c_inv.resize(c_inv_sz, -1);
const size_t THREADS_PER_BLOCK = 128;
const size_t NUM_BLOCKS = std::min(AMGX_GRID_MAX_SIZE, (int)ceil((ValueTypeB)c_sz / (ValueTypeB)THREADS_PER_BLOCK));
if (c_sz > 0)
{
device_vector_alloc<ValueTypeA> aa(c_sz, 1);
compute_cumul_inv_kernel<IndexType, ValueTypeA, ValueTypeB> <<< (unsigned int)NUM_BLOCKS, (unsigned int)THREADS_PER_BLOCK>>>
(c_sz,
this->m_an.raw(),
d_inv,
c_inv_sz,
this->m_c_inv.raw());
}
cudaDeviceSynchronize();
cudaCheckError();
}
}
template<class T_Config>
void KaczmarzSolver_Base<T_Config>::compute_anorm( Matrix<T_Config> &A)
{
this->m_an.resize(A.get_num_rows()*A.get_block_dimx());
ViewType oldView = A.currentView();
A.setView(this->m_explicit_A->getViewExterior());
if (A.get_block_dimx() == 1 && A.get_block_dimy() == 1)
{
compute_anorm_1x1(A);
}
else
{
FatalError("Unsupported block size for KaczmarzSolver", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
A.setView(oldView);
}
template<class T_Config>
void KaczmarzSolver_Base<T_Config>::compute_amax( Matrix<T_Config> &A)
{
this->m_amax.resize(A.get_num_rows()*A.get_block_dimx());
ViewType oldView = A.currentView();
A.setView(this->m_explicit_A->getViewExterior());
if (A.get_block_dimx() == 1 && A.get_block_dimy() == 1)
{
compute_amax_1x1(A);
}
else
{
FatalError("Unsupported block size for KaczmarzSolver", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
A.setView(oldView);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void KaczmarzSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::compute_anorm_1x1(const Matrix_d &A)
{
//DIAG: starnge issues trying to add DIAG property handling
// now leaving !DIAG only
if (A.hasProps(DIAG))
{
FatalError("Unsupported separate diag", AMGX_ERR_NOT_IMPLEMENTED);
}
typedef typename Matrix_d::index_type IndexType;
typedef typename Matrix_d::value_type ValueTypeA;
const size_t THREADS_PER_BLOCK = 128;
const size_t NUM_BLOCKS = std::min(AMGX_GRID_MAX_SIZE, (int)ceil((ValueTypeB)A.get_num_rows() / (ValueTypeB)THREADS_PER_BLOCK));
if (A.get_num_rows() > 0)
{
compute_anorm_kernel<IndexType, ValueTypeA, ValueTypeB> <<< (unsigned int)NUM_BLOCKS, (unsigned int)THREADS_PER_BLOCK>>>
((int)A.get_num_rows(),
A.row_offsets.raw(),
A.col_indices.raw(),
A.values.raw(),
this->m_an.raw());
}
cudaCheckError();
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void KaczmarzSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::compute_amax_1x1(const Matrix_d &A)
{
//DIAG: starnge issues trying to add DIAG property handling
// now leaving !DIAG only
if (A.hasProps(DIAG))
{
FatalError("Unsupported separate diag", AMGX_ERR_NOT_IMPLEMENTED);
}
typedef typename Matrix_d::index_type IndexType;
typedef typename Matrix_d::value_type ValueTypeA;
const size_t THREADS_PER_BLOCK = 128;
const size_t NUM_BLOCKS = std::min(AMGX_GRID_MAX_SIZE, (int)ceil((ValueTypeB)A.get_num_rows() / (ValueTypeB)THREADS_PER_BLOCK));
if (A.get_num_rows() > 0)
{
compute_amax_kernel<IndexType, ValueTypeA, ValueTypeB> <<< (unsigned int)NUM_BLOCKS, (unsigned int)THREADS_PER_BLOCK>>>
((int)A.get_num_rows(),
A.row_offsets.raw(),
A.col_indices.raw(),
A.values.raw(),
this->m_amax.raw());
}
cudaCheckError();
}
template<class T_Config>
void
KaczmarzSolver_Base<T_Config>::solve_init( VVector &b, VVector &x, bool xIsZero )
{
}
// Solve one iteration
template<class T_Config>
bool
KaczmarzSolver_Base<T_Config>::solve_iteration( VVector &b, VVector &x, bool xIsZero )
{
if (xIsZero) { x.dirtybit = 0; }
if (!this->m_explicit_A->is_matrix_singleGPU())
{
this->m_explicit_A->manager->exchange_halo_async(x, x.tag);
if (this->m_explicit_A->getViewExterior() == this->m_explicit_A->getViewInterior())
{
this->m_explicit_A->manager->exchange_halo_wait(x, x.tag);
}
}
ViewType oldView = this->m_explicit_A->currentView();
ViewType flags;
bool latencyHiding = true;
if (this->m_explicit_A->is_matrix_singleGPU() || (x.dirtybit == 0))
{
latencyHiding = false;
this->m_explicit_A->setViewExterior();
flags = this->m_explicit_A->getViewExterior();
}
else
{
flags = this->m_explicit_A->getViewInterior();
this->m_explicit_A->setViewInterior();
}
if (this->m_explicit_A->get_block_dimx() == 1 && this->m_explicit_A->get_block_dimy() == 1)
{
if (xIsZero)
{
//smooth_with_0_initial_guess_1x1(*this->m_explicit_A, b, x, flags);
smooth_1x1(*this->m_explicit_A, b, x, flags, latencyHiding);
}
else
{
smooth_1x1(*this->m_explicit_A, b, x, flags, latencyHiding);
}
}
else
{
FatalError("Unsupported block size for Kaczmarz_Solver", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
x.dirtybit = 1;
this->m_explicit_A->setView(oldView);
return this->converged( b, x );
}
template<class T_Config>
void
KaczmarzSolver_Base<T_Config>::solve_finalize( VVector &b, VVector &x )
{
}
// Multicolor version
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void KaczmarzSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::smooth_1x1_MC(Matrix_d &A, VVector &b, VVector &x, ViewType separation_flags, bool latency_hiding)
{
typedef typename Matrix_d::index_type IndexType;
typedef typename Matrix_d::value_type ValueTypeA;
ValueTypeB *x_ptr = x.raw();
IndexType num_rows = A.get_num_rows();
const int num_colors = this->m_explicit_A->getMatrixColoring().getNumColors();
const IndexType *A_sorted_rows_by_color_ptr = A.getMatrixColoring().getSortedRowsByColor().raw();
for (int i = 0; i < num_colors; i++)
{
const IndexType color_offset = ((separation_flags & INTERIOR) == 0) ? A.getMatrixColoring().getSeparationOffsetsRowsPerColor()[i] : A.getMatrixColoring().getOffsetsRowsPerColor()[i];
const IndexType num_rows_per_color = ((separation_flags == this->m_explicit_A->getViewInterior()) ? A.getMatrixColoring().getSeparationOffsetsRowsPerColor()[i] : A.getMatrixColoring().getOffsetsRowsPerColor()[i + 1]) - color_offset;
if (num_rows_per_color == 0) { continue; }
const int threads_per_block = 128;
const int blockrows_per_warp = 1;
const int blockrows_per_cta = (threads_per_block / 32) * blockrows_per_warp;
const int num_blocks = std::min( AMGX_GRID_MAX_SIZE, (int) (num_rows_per_color / blockrows_per_cta + 1));
multicolor_kaczmarz_smooth_kernel<IndexType, ValueTypeA, ValueTypeB, threads_per_block> <<< num_blocks, threads_per_block >>>
(A.get_num_rows(),
A.row_offsets.raw(),
A.col_indices.raw(),
A.values.raw(),
this->m_an.raw(),
b.raw(),
x_ptr,
this->weight,
A_sorted_rows_by_color_ptr + color_offset, num_rows_per_color,
x_ptr);
cudaCheckError();
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void KaczmarzSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::smooth_1x1_naive(Matrix_d &A, VVector &b, VVector &x, ViewType separation_flags, bool latency_hiding)
{
typedef typename Matrix_d::index_type IndexType;
typedef typename Matrix_d::value_type ValueTypeA;
ValueTypeB *x_ptr = x.raw();
IndexType num_rows = A.get_num_rows();
IndexType offset = 0;
// Skipping Multi-GPU logic for now
// Current. Will be exact only with one warp per grid
const int threads_per_block = 32;
const int num_blocks = 1;
if (this->m_randomized)
{
IVector rnd_rows;
int c_inv_sz = this->m_c_inv.size();
initRandom(rnd_rows, A.get_num_rows(), c_inv_sz);
randomized_kaczmarz_smooth_kernel_warp_atomics<IndexType, ValueTypeA, ValueTypeB, threads_per_block> <<< num_blocks, threads_per_block >>>
(A.get_num_rows(),
A.row_offsets.raw(),
A.col_indices.raw(),
A.values.raw(),
this->m_c_inv.raw(),
rnd_rows.raw(),
b.raw(),
x_ptr,
x_ptr,
offset);
}
else
{
kaczmarz_smooth_kernel_warp_atomics<IndexType, ValueTypeA, ValueTypeB, threads_per_block> <<< num_blocks, threads_per_block >>>
(A.get_num_rows(),
A.row_offsets.raw(),
A.col_indices.raw(),
A.values.raw(),
this->m_an.raw(),
b.raw(),
x_ptr,
x_ptr,
offset);
}
cudaCheckError();
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void KaczmarzSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::smooth_1x1(Matrix_d &A, VVector &b, VVector &x, ViewType separation_flags, bool latency_hiding)
{
if (this->m_coloring_needed)
{
smooth_1x1_MC(A, b, x, separation_flags, latency_hiding);
}
else
{
smooth_1x1_naive(A, b, x, separation_flags, latency_hiding);
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void KaczmarzSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::smooth_with_0_initial_guess_1x1(Matrix_d &A, VVector &b, VVector &x, ViewType separation_flags)
{
ViewType oldView = A.currentView();
// Process all rows
A.setViewExterior();
ViewType flags = A.getViewExterior();
int offset, num_rows;
A.getOffsetAndSizeForView(flags, &offset, &num_rows);
A.setView(oldView);
cudaCheckError();
}
/****************************************
* Explict instantiations
***************************************/
#define AMGX_CASE_LINE(CASE) template class KaczmarzSolver_Base<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template class KaczmarzSolver<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
} // namespace amgx
|
c796862f0f69e6e353ba044b96eec6934245fff7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "MM2chainKernel.h"
__global__ void multiply(const float* A, float* partial_mult ){
int block = blockIdx.x;
int thread = threadIdx.x;
int BLOCK_SIZE = gridDim.x;
__shared__ float shared_A[B*C*4];
__shared__ float shared_results[B*4];
const float* AStart = A + ((B)*(blockIdx.x)*C*4);
for( int i = 0; i<C*4; i++){
shared_A[i+thread*C*4] = AStart[i+thread*C*4];
}
__syncthreads();
float current[] = { 1.0, 0.0, 0.0, 1.0} ;
float temp[4];
for(int i = 0; i<C*4; i += 4){
float mat00 = shared_A[i + C*4*threadIdx.x];
float mat01 = shared_A[i+1 + C*4*threadIdx.x];
float mat10 = shared_A[i+2 + C*4*threadIdx.x];
float mat11 = shared_A[i+3 + C*4*threadIdx.x];
temp[0] = current[0] * mat00 + current[1]*mat10;
temp[1] = current[0] * mat01 + current[1]*mat11;
temp[2] = current[2] * mat00 + current[3]*mat10;
temp[3] = current[2] * mat01 + current[3]*mat11;
current[0] = temp[0];
current[1] = temp[1];
current[2] = temp[2];
current[3] = temp[3];
}
shared_results[threadIdx.x*4] = current[0];
shared_results[threadIdx.x*4+1] = current[1];
shared_results[threadIdx.x*4+2] = current[2];
shared_results[threadIdx.x*4+3] = current[3];
__syncthreads();
if(threadIdx.x == 0){
float current[] = { 1.0, 0.0, 0.0, 1.0} ;
for(int i = 0; i<B; i += 4){
temp[0] = current[0] * shared_results[threadIdx.x*4] + current[1] * shared_results[threadIdx.x*4 + 2];
temp[1] = current[0] * shared_results[threadIdx.x*4+1] + current[1] * shared_results[threadIdx.x*4 + 3];
temp[2] = current[2] * shared_results[threadIdx.x*4] + current[3] * shared_results[threadIdx.x*4 + 2];
temp[3] = current[2] * shared_results[threadIdx.x*4+1] + current[3] * shared_results[threadIdx.x*4 + 3];
current[0] = temp[0];
current[1] = temp[1];
current[2] = temp[2];
current[3] = temp[3];
}
float* partial_mult_start = partial_mult + 4 * block;
partial_mult_start[0] = current[0];
partial_mult_start[1] = current[1];
partial_mult_start[2] = current[2];
partial_mult_start[3] = current[3];
}
}
|
c796862f0f69e6e353ba044b96eec6934245fff7.cu
|
#include <stdio.h>
#include "MM2chainKernel.h"
__global__ void multiply(const float* A, float* partial_mult ){
int block = blockIdx.x;
int thread = threadIdx.x;
int BLOCK_SIZE = gridDim.x;
__shared__ float shared_A[B*C*4];
__shared__ float shared_results[B*4];
const float* AStart = A + ((B)*(blockIdx.x)*C*4);
for( int i = 0; i<C*4; i++){
shared_A[i+thread*C*4] = AStart[i+thread*C*4];
}
__syncthreads();
float current[] = { 1.0, 0.0, 0.0, 1.0} ;
float temp[4];
for(int i = 0; i<C*4; i += 4){
float mat00 = shared_A[i + C*4*threadIdx.x];
float mat01 = shared_A[i+1 + C*4*threadIdx.x];
float mat10 = shared_A[i+2 + C*4*threadIdx.x];
float mat11 = shared_A[i+3 + C*4*threadIdx.x];
temp[0] = current[0] * mat00 + current[1]*mat10;
temp[1] = current[0] * mat01 + current[1]*mat11;
temp[2] = current[2] * mat00 + current[3]*mat10;
temp[3] = current[2] * mat01 + current[3]*mat11;
current[0] = temp[0];
current[1] = temp[1];
current[2] = temp[2];
current[3] = temp[3];
}
shared_results[threadIdx.x*4] = current[0];
shared_results[threadIdx.x*4+1] = current[1];
shared_results[threadIdx.x*4+2] = current[2];
shared_results[threadIdx.x*4+3] = current[3];
__syncthreads();
if(threadIdx.x == 0){
float current[] = { 1.0, 0.0, 0.0, 1.0} ;
for(int i = 0; i<B; i += 4){
temp[0] = current[0] * shared_results[threadIdx.x*4] + current[1] * shared_results[threadIdx.x*4 + 2];
temp[1] = current[0] * shared_results[threadIdx.x*4+1] + current[1] * shared_results[threadIdx.x*4 + 3];
temp[2] = current[2] * shared_results[threadIdx.x*4] + current[3] * shared_results[threadIdx.x*4 + 2];
temp[3] = current[2] * shared_results[threadIdx.x*4+1] + current[3] * shared_results[threadIdx.x*4 + 3];
current[0] = temp[0];
current[1] = temp[1];
current[2] = temp[2];
current[3] = temp[3];
}
float* partial_mult_start = partial_mult + 4 * block;
partial_mult_start[0] = current[0];
partial_mult_start[1] = current[1];
partial_mult_start[2] = current[2];
partial_mult_start[3] = current[3];
}
}
|
e9d6da1bfbbdf84878fad0e22e22c434311f9d6e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <math.h>
#include <stdint.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#define WID 1024
#define HEI 1024
#pragma pack(push,1)
typedef struct tagBITMAPFILEHEADER
{
unsigned short bfType;
uint32_t bfSize;
unsigned short bfReserved1;
unsigned short bfReserved2;
uint32_t bf0ffBits;
}BITMAPFILEHEADER;
#pragma pack(pop)
typedef struct tagBITMAPINFOHEADER
{
uint32_t biSize;
int32_t biWidth;
int32_t biHeight;
unsigned short biPlanes;
unsigned short biBitCount;
uint32_t biCompression;
uint32_t biSizeImage;
int32_t biXPelsPerMeter;
int32_t biYPelsPerMeter;
uint32_t biCirUsed;
uint32_t biCirImportant;
}BITMAPINFOHEADER;
typedef struct tagRGBQUAD
{
unsigned char rgbBlue;
unsigned char rgbGreen;
unsigned char rgbRed;
unsigned char rgbReserved;
}RGBQUAD;
typedef struct tagBITMAPINFO
{
BITMAPINFOHEADER bmiHeader;
RGBQUAD bmiColors[1];
}BITMAPINFO;
__global__ void distance_gpu(int *x_d,int *y_d,float *z_d,float *img_buf_d,int *tensuu_d)
{
int i,j,k;
i=blockIdx.x*128+threadIdx.x;
float kankaku,hatyou,goukei,pi;
hatyou=0.633F;
kankaku=10.5F;
pi=3.14159265F;
goukei=2.0F*pi*kankaku/hatyou;
float dx,dy,tmp;
for(j=0;j<WID;j++){
tmp=0.0F;
for(k=0;k<*tensuu_d;k++){
dx=(float)(x_d[k]-j);
dy=(float)(y_d[k]-i);
tmp=tmp+cos(goukei*0.5F*(dx*dx+dy*dy)/z_d[k]);
}
img_buf_d[i*WID+j] = tmp;
}
}
int main(){
int tensuu;
BITMAPFILEHEADER BmpFileHeader;
BITMAPINFOHEADER BmpInfoHeader;
RGBQUAD RGBQuad[256];
FILE *fp;
int i,j;
BmpFileHeader.bfType =19778;
BmpFileHeader.bfSize =14+40+1024+(WID*HEI);
BmpFileHeader.bfReserved1 =0;
BmpFileHeader.bfReserved2 =0;
BmpFileHeader.bf0ffBits =14+40+1024;
BmpInfoHeader.biSize =40;
BmpInfoHeader.biWidth =WID;
BmpInfoHeader.biHeight =HEI;
BmpInfoHeader.biPlanes =1;
BmpInfoHeader.biBitCount =8; //256
BmpInfoHeader.biCompression =0L;
BmpInfoHeader.biSizeImage =0L;
BmpInfoHeader.biXPelsPerMeter =0L;
BmpInfoHeader.biYPelsPerMeter =0L;
BmpInfoHeader.biCirUsed =0L;
BmpInfoHeader.biCirImportant =0L;
for(i=0;i<256;i++){
RGBQuad[i].rgbBlue =i;
RGBQuad[i].rgbGreen =i;
RGBQuad[i].rgbRed =i;
RGBQuad[i].rgbReserved =0;
}
char filename[20]={};
//printf(" : ");
//scanf("%s",filename);
//fp=fopen(filename,"rb");
fp=fopen("cubex.3d","rb");
if(fp==NULL){
printf("File Open ERROR\n");
}
fread(&tensuu,sizeof(int),1,fp);
printf("num=%d\n",tensuu);
int x[tensuu];
int y[tensuu];
float z[tensuu];
int *tensuu_d;
hipMalloc((void**)&tensuu_d,sizeof(int));
hipMemcpy(tensuu_d,&tensuu,sizeof(int),hipMemcpyHostToDevice);
int *x_d,*y_d;
float *z_d;
float *img_buf_d;
dim3 blocks(8,1,1);
dim3 threads(128,1,1);
int x_buf,y_buf,z_buf;
for(i=0;i<tensuu;i++){
fread(&x_buf,sizeof(int),1,fp);
fread(&y_buf,sizeof(int),1,fp);
fread(&z_buf,sizeof(int),1,fp);
x[i]=x_buf*40+512;
y[i]=y_buf*40+512;
z[i]=((float)z_buf)*40+100000.0F;
}
fclose(fp);
hipMalloc((void**)&x_d,tensuu*sizeof(int));
hipMalloc((void**)&y_d,tensuu*sizeof(int));
hipMalloc((void**)&z_d,tensuu*sizeof(float));
hipMalloc((void**)&img_buf_d,WID*HEI*sizeof(float));
float *img_buf;
img_buf=(float *)malloc(sizeof(float)*WID*HEI);
for(i=0;i<WID*HEI;i++){
img_buf[i]=0.0F;
}
hipMemcpy(x_d,x,tensuu*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(y_d,y,tensuu*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(z_d,z,tensuu*sizeof(float),hipMemcpyHostToDevice);
hipMemcpy(img_buf_d,img_buf,WID*HEI*sizeof(float),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( distance_gpu), dim3(blocks),dim3(threads), 0, 0, x_d,y_d,z_d,img_buf_d,tensuu_d);
hipMemcpy(img_buf,img_buf_d,WID*HEI*sizeof(float),hipMemcpyDeviceToHost);
float min,max,mid;
min=img_buf[0];
max=img_buf[0];
for(i=0;i<HEI;i++){
for(j=0;j<WID;j++){
if(min>img_buf[i*WID+j]){
min=img_buf[i*WID+j];
}
if(max<img_buf[i*WID+j]){
max=img_buf[i*WID+j];
}
}
}
mid=0.5F*(min+max);
printf("min = %lf max = %lf mid = %lf\n",min,max,mid);
unsigned char *img;
img=(unsigned char *)malloc(sizeof(unsigned char)*WID*HEI);
for(i=0;i<WID*HEI;i++){
if(img_buf[i]<mid){
img[i]=0;
}
if(img_buf[i]>mid){
img[i]=255;
}
}
FILE *fp1;
fp1=fopen("cgh_root_gpu.bmp","wb");
if(fp1==NULL){
printf("\n");
}
fwrite(&BmpFileHeader, sizeof(BmpFileHeader) , 1 ,fp1);
fwrite(&BmpInfoHeader, sizeof(BmpInfoHeader) , 1 ,fp1);
fwrite(&RGBQuad[0], sizeof(RGBQuad[0]) , 256 ,fp1);
fwrite(img,sizeof(unsigned char),WID*HEI,fp1);
free(img);
free(img_buf);
fclose(fp1);
hipFree(tensuu_d);
hipFree(x_d);
hipFree(y_d);
hipFree(z_d);
hipFree(img_buf_d);
return 0;
}
|
e9d6da1bfbbdf84878fad0e22e22c434311f9d6e.cu
|
#include <stdio.h>
#include <math.h>
#include <stdint.h>
#include <stdlib.h>
#include <cuda.h>
#define WID 1024
#define HEI 1024
#pragma pack(push,1)
typedef struct tagBITMAPFILEHEADER
{
unsigned short bfType;
uint32_t bfSize;
unsigned short bfReserved1;
unsigned short bfReserved2;
uint32_t bf0ffBits;
}BITMAPFILEHEADER;
#pragma pack(pop)
typedef struct tagBITMAPINFOHEADER
{
uint32_t biSize;
int32_t biWidth;
int32_t biHeight;
unsigned short biPlanes;
unsigned short biBitCount;
uint32_t biCompression;
uint32_t biSizeImage;
int32_t biXPelsPerMeter;
int32_t biYPelsPerMeter;
uint32_t biCirUsed;
uint32_t biCirImportant;
}BITMAPINFOHEADER;
typedef struct tagRGBQUAD
{
unsigned char rgbBlue;
unsigned char rgbGreen;
unsigned char rgbRed;
unsigned char rgbReserved;
}RGBQUAD;
typedef struct tagBITMAPINFO
{
BITMAPINFOHEADER bmiHeader;
RGBQUAD bmiColors[1];
}BITMAPINFO;
__global__ void distance_gpu(int *x_d,int *y_d,float *z_d,float *img_buf_d,int *tensuu_d)
{
int i,j,k;
i=blockIdx.x*128+threadIdx.x;
float kankaku,hatyou,goukei,pi;
hatyou=0.633F;
kankaku=10.5F;
pi=3.14159265F;
goukei=2.0F*pi*kankaku/hatyou;
float dx,dy,tmp;
for(j=0;j<WID;j++){
tmp=0.0F;
for(k=0;k<*tensuu_d;k++){
dx=(float)(x_d[k]-j);
dy=(float)(y_d[k]-i);
tmp=tmp+cos(goukei*0.5F*(dx*dx+dy*dy)/z_d[k]);
}
img_buf_d[i*WID+j] = tmp;
}
}
int main(){
int tensuu;
BITMAPFILEHEADER BmpFileHeader;
BITMAPINFOHEADER BmpInfoHeader;
RGBQUAD RGBQuad[256];
FILE *fp;
int i,j;
BmpFileHeader.bfType =19778;
BmpFileHeader.bfSize =14+40+1024+(WID*HEI);
BmpFileHeader.bfReserved1 =0;
BmpFileHeader.bfReserved2 =0;
BmpFileHeader.bf0ffBits =14+40+1024;
BmpInfoHeader.biSize =40;
BmpInfoHeader.biWidth =WID;
BmpInfoHeader.biHeight =HEI;
BmpInfoHeader.biPlanes =1;
BmpInfoHeader.biBitCount =8; //256階調
BmpInfoHeader.biCompression =0L;
BmpInfoHeader.biSizeImage =0L;
BmpInfoHeader.biXPelsPerMeter =0L;
BmpInfoHeader.biYPelsPerMeter =0L;
BmpInfoHeader.biCirUsed =0L;
BmpInfoHeader.biCirImportant =0L;
for(i=0;i<256;i++){
RGBQuad[i].rgbBlue =i;
RGBQuad[i].rgbGreen =i;
RGBQuad[i].rgbRed =i;
RGBQuad[i].rgbReserved =0;
}
char filename[20]={};
//printf("ファイル名を入力してください : ");
//scanf("%s",filename);
//fp=fopen(filename,"rb");
fp=fopen("cubex.3d","rb");
if(fp==NULL){
printf("File Open ERROR\n");
}
fread(&tensuu,sizeof(int),1,fp);
printf("num=%d\n",tensuu);
int x[tensuu];
int y[tensuu];
float z[tensuu];
int *tensuu_d;
cudaMalloc((void**)&tensuu_d,sizeof(int));
cudaMemcpy(tensuu_d,&tensuu,sizeof(int),cudaMemcpyHostToDevice);
int *x_d,*y_d;
float *z_d;
float *img_buf_d;
dim3 blocks(8,1,1);
dim3 threads(128,1,1);
int x_buf,y_buf,z_buf;
for(i=0;i<tensuu;i++){
fread(&x_buf,sizeof(int),1,fp);
fread(&y_buf,sizeof(int),1,fp);
fread(&z_buf,sizeof(int),1,fp);
x[i]=x_buf*40+512;
y[i]=y_buf*40+512;
z[i]=((float)z_buf)*40+100000.0F;
}
fclose(fp);
cudaMalloc((void**)&x_d,tensuu*sizeof(int));
cudaMalloc((void**)&y_d,tensuu*sizeof(int));
cudaMalloc((void**)&z_d,tensuu*sizeof(float));
cudaMalloc((void**)&img_buf_d,WID*HEI*sizeof(float));
float *img_buf;
img_buf=(float *)malloc(sizeof(float)*WID*HEI);
for(i=0;i<WID*HEI;i++){
img_buf[i]=0.0F;
}
cudaMemcpy(x_d,x,tensuu*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(y_d,y,tensuu*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(z_d,z,tensuu*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(img_buf_d,img_buf,WID*HEI*sizeof(float),cudaMemcpyHostToDevice);
distance_gpu<<<blocks,threads>>>(x_d,y_d,z_d,img_buf_d,tensuu_d);
cudaMemcpy(img_buf,img_buf_d,WID*HEI*sizeof(float),cudaMemcpyDeviceToHost);
float min,max,mid;
min=img_buf[0];
max=img_buf[0];
for(i=0;i<HEI;i++){
for(j=0;j<WID;j++){
if(min>img_buf[i*WID+j]){
min=img_buf[i*WID+j];
}
if(max<img_buf[i*WID+j]){
max=img_buf[i*WID+j];
}
}
}
mid=0.5F*(min+max);
printf("min = %lf max = %lf mid = %lf\n",min,max,mid);
unsigned char *img;
img=(unsigned char *)malloc(sizeof(unsigned char)*WID*HEI);
for(i=0;i<WID*HEI;i++){
if(img_buf[i]<mid){
img[i]=0;
}
if(img_buf[i]>mid){
img[i]=255;
}
}
FILE *fp1;
fp1=fopen("cgh_root_gpu.bmp","wb");
if(fp1==NULL){
printf("ファイルオープンエラー\n");
}
fwrite(&BmpFileHeader, sizeof(BmpFileHeader) , 1 ,fp1);
fwrite(&BmpInfoHeader, sizeof(BmpInfoHeader) , 1 ,fp1);
fwrite(&RGBQuad[0], sizeof(RGBQuad[0]) , 256 ,fp1);
fwrite(img,sizeof(unsigned char),WID*HEI,fp1);
free(img);
free(img_buf);
fclose(fp1);
cudaFree(tensuu_d);
cudaFree(x_d);
cudaFree(y_d);
cudaFree(z_d);
cudaFree(img_buf_d);
return 0;
}
|
43321d02df8e46a6113fe9a641413c01f27e8250.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <call_kernel.h>
//xfail:BUGLE_ERROR
//--gridDim=1 --blockDim=32 --no-inline
//This kernel is racy: memset is called with variable length.
//#define memset(dst,val,len) __builtin_memset(dst,val,len)
#define N 2//32
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
__device__ int bar(void){
int value;
return value;
}
__global__ void kernel(uint4 *out) {
uint4 vector;
int len = bar();
memset(&vector, 5, len); /*modify manually the value of len to see the bugs*/
out[threadIdx.x] = vector;
}
int main(){
uint4 *a;
uint4 *dev_a;
int size = N*sizeof(uint4);
a = (uint4*)malloc(size);
/* initialization of a */
for (int i = 0; i < N; i++) {
a[i].x = i; a[i].y = i; a[i].z = i, a[i].w = i;
}
hipMalloc((void**)&dev_a, size);
hipMemcpy(dev_a,a,size, hipMemcpyHostToDevice);
/* printf("a:\n");
for (int i = 0; i < N; i++)
printf("a[%d].x : %d \ta[%d].y : %d\ta[%d].z : %d\ta[%d].w : %d\n", i, a[i].x, i, a[i].y, i, a[i].z, i, a[i].w);
*/
// kernel<<<1,N>>>(dev_a);
ESBMC_verify_kernel_u(kernel,1,N,dev_a);
hipMemcpy(a,dev_a,size,hipMemcpyDeviceToHost);
/* printf("new a:\n");
for (int i = 0; i < N; i++)
printf("a[%d].x : %d \ta[%d].y : %d\ta[%d].z : %d\ta[%d].w : %d\n", i, a[i].x, i, a[i].y, i, a[i].z, i, a[i].w);
*/
hipFree(dev_a);
free(a);
return 0;
}
|
43321d02df8e46a6113fe9a641413c01f27e8250.cu
|
#include <call_kernel.h>
//xfail:BUGLE_ERROR
//--gridDim=1 --blockDim=32 --no-inline
//This kernel is racy: memset is called with variable length.
//#define memset(dst,val,len) __builtin_memset(dst,val,len)
#define N 2//32
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
__device__ int bar(void){
int value;
return value;
}
__global__ void kernel(uint4 *out) {
uint4 vector;
int len = bar();
memset(&vector, 5, len); /*modify manually the value of len to see the bugs*/
out[threadIdx.x] = vector;
}
int main(){
uint4 *a;
uint4 *dev_a;
int size = N*sizeof(uint4);
a = (uint4*)malloc(size);
/* initialization of a */
for (int i = 0; i < N; i++) {
a[i].x = i; a[i].y = i; a[i].z = i, a[i].w = i;
}
cudaMalloc((void**)&dev_a, size);
cudaMemcpy(dev_a,a,size, cudaMemcpyHostToDevice);
/* printf("a:\n");
for (int i = 0; i < N; i++)
printf("a[%d].x : %d \ta[%d].y : %d\ta[%d].z : %d\ta[%d].w : %d\n", i, a[i].x, i, a[i].y, i, a[i].z, i, a[i].w);
*/
// kernel<<<1,N>>>(dev_a);
ESBMC_verify_kernel_u(kernel,1,N,dev_a);
cudaMemcpy(a,dev_a,size,cudaMemcpyDeviceToHost);
/* printf("new a:\n");
for (int i = 0; i < N; i++)
printf("a[%d].x : %d \ta[%d].y : %d\ta[%d].z : %d\ta[%d].w : %d\n", i, a[i].x, i, a[i].y, i, a[i].z, i, a[i].w);
*/
cudaFree(dev_a);
free(a);
return 0;
}
|
cc5ca667a022ec8045bd2ef9e60d5926cfefadb3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "timer.h"
#define BLOCK_SIZE 256
#define SOFTENING 1e-9f
typedef struct { float4 *pos, *vel; } BodySystem;
void randomizeBodies(float *data, int n) {
for (int i = 0; i < n; i++) {
data[i] = 2.0f * (rand() / (float)RAND_MAX) - 1.0f;
}
}
__global__
void bodyForce(float4 *p, float4 *v, float dt, int n) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) {
float Fx = 0.0f; float Fy = 0.0f; float Fz = 0.0f;
for (int tile = 0; tile < gridDim.x; tile++) {
__shared__ float3 spos[BLOCK_SIZE];
float4 tpos = p[tile * blockDim.x + threadIdx.x];
spos[threadIdx.x] = make_float3(tpos.x, tpos.y, tpos.z);
__syncthreads();
for (int j = 0; j < BLOCK_SIZE; j++) {
float dx = spos[j].x - p[i].x;
float dy = spos[j].y - p[i].y;
float dz = spos[j].z - p[i].z;
float distSqr = dx*dx + dy*dy + dz*dz + SOFTENING;
float invDist = rsqrtf(distSqr);
float invDist3 = invDist * invDist * invDist;
Fx += dx * invDist3; Fy += dy * invDist3; Fz += dz * invDist3;
}
__syncthreads();
}
v[i].x += dt*Fx; v[i].y += dt*Fy; v[i].z += dt*Fz;
}
}
int main(const int argc, const char** argv) {
int nBodies = 30000;
if (argc > 1) nBodies = atoi(argv[1]);
const float dt = 0.01f; // time step
const int nIters = 10; // simulation iterations
int bytes = 2*nBodies*sizeof(float4);
float *buf = (float*)malloc(bytes);
BodySystem p = { (float4*)buf, ((float4*)buf) + nBodies };
randomizeBodies(buf, 8*nBodies); // Init pos / vel data
float *d_buf;
hipMalloc(&d_buf, bytes);
BodySystem d_p = { (float4*)d_buf, ((float4*)d_buf) + nBodies };
int nBlocks = (nBodies + BLOCK_SIZE - 1) / BLOCK_SIZE;
double totalTime = 0.0;
for (int iter = 1; iter <= nIters; iter++) {
StartTimer();
hipMemcpy(d_buf, buf, bytes, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( bodyForce), dim3(nBlocks), dim3(BLOCK_SIZE), 0, 0, d_p.pos, d_p.vel, dt, nBodies);
hipMemcpy(buf, d_buf, bytes, hipMemcpyDeviceToHost);
for (int i = 0 ; i < nBodies; i++) { // integrate position
p.pos[i].x += p.vel[i].x*dt;
p.pos[i].y += p.vel[i].y*dt;
p.pos[i].z += p.vel[i].z*dt;
}
const double tElapsed = GetTimer() / 1000.0;
if (iter > 1) { // First iter is warm up
totalTime += tElapsed;
}
#ifndef SHMOO
printf("Iteration %d: %.3f seconds\n", iter, tElapsed);
#endif
}
double avgTime = totalTime / (double)(nIters-1);
#ifdef SHMOO
printf("%d, %0.3f\n", nBodies, 1e-9 * nBodies * nBodies / avgTime);
#else
printf("Average rate for iterations 2 through %d: %.3f +- %.3f steps per second.\n",
nIters, rate);
printf("%d Bodies: average %0.3f Billion Interactions / second\n", nBodies, 1e-9 * nBodies * nBodies / avgTime);
#endif
free(buf);
hipFree(d_buf);
}
|
cc5ca667a022ec8045bd2ef9e60d5926cfefadb3.cu
|
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "timer.h"
#define BLOCK_SIZE 256
#define SOFTENING 1e-9f
typedef struct { float4 *pos, *vel; } BodySystem;
void randomizeBodies(float *data, int n) {
for (int i = 0; i < n; i++) {
data[i] = 2.0f * (rand() / (float)RAND_MAX) - 1.0f;
}
}
__global__
void bodyForce(float4 *p, float4 *v, float dt, int n) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) {
float Fx = 0.0f; float Fy = 0.0f; float Fz = 0.0f;
for (int tile = 0; tile < gridDim.x; tile++) {
__shared__ float3 spos[BLOCK_SIZE];
float4 tpos = p[tile * blockDim.x + threadIdx.x];
spos[threadIdx.x] = make_float3(tpos.x, tpos.y, tpos.z);
__syncthreads();
for (int j = 0; j < BLOCK_SIZE; j++) {
float dx = spos[j].x - p[i].x;
float dy = spos[j].y - p[i].y;
float dz = spos[j].z - p[i].z;
float distSqr = dx*dx + dy*dy + dz*dz + SOFTENING;
float invDist = rsqrtf(distSqr);
float invDist3 = invDist * invDist * invDist;
Fx += dx * invDist3; Fy += dy * invDist3; Fz += dz * invDist3;
}
__syncthreads();
}
v[i].x += dt*Fx; v[i].y += dt*Fy; v[i].z += dt*Fz;
}
}
int main(const int argc, const char** argv) {
int nBodies = 30000;
if (argc > 1) nBodies = atoi(argv[1]);
const float dt = 0.01f; // time step
const int nIters = 10; // simulation iterations
int bytes = 2*nBodies*sizeof(float4);
float *buf = (float*)malloc(bytes);
BodySystem p = { (float4*)buf, ((float4*)buf) + nBodies };
randomizeBodies(buf, 8*nBodies); // Init pos / vel data
float *d_buf;
cudaMalloc(&d_buf, bytes);
BodySystem d_p = { (float4*)d_buf, ((float4*)d_buf) + nBodies };
int nBlocks = (nBodies + BLOCK_SIZE - 1) / BLOCK_SIZE;
double totalTime = 0.0;
for (int iter = 1; iter <= nIters; iter++) {
StartTimer();
cudaMemcpy(d_buf, buf, bytes, cudaMemcpyHostToDevice);
bodyForce<<<nBlocks, BLOCK_SIZE>>>(d_p.pos, d_p.vel, dt, nBodies);
cudaMemcpy(buf, d_buf, bytes, cudaMemcpyDeviceToHost);
for (int i = 0 ; i < nBodies; i++) { // integrate position
p.pos[i].x += p.vel[i].x*dt;
p.pos[i].y += p.vel[i].y*dt;
p.pos[i].z += p.vel[i].z*dt;
}
const double tElapsed = GetTimer() / 1000.0;
if (iter > 1) { // First iter is warm up
totalTime += tElapsed;
}
#ifndef SHMOO
printf("Iteration %d: %.3f seconds\n", iter, tElapsed);
#endif
}
double avgTime = totalTime / (double)(nIters-1);
#ifdef SHMOO
printf("%d, %0.3f\n", nBodies, 1e-9 * nBodies * nBodies / avgTime);
#else
printf("Average rate for iterations 2 through %d: %.3f +- %.3f steps per second.\n",
nIters, rate);
printf("%d Bodies: average %0.3f Billion Interactions / second\n", nBodies, 1e-9 * nBodies * nBodies / avgTime);
#endif
free(buf);
cudaFree(d_buf);
}
|
973056ff5f5d28f8ccab2b4dc401d09530afa413.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// This is the REAL "hello world" for CUDA!
// It takes the string "Hello ", prints it, then passes it to CUDA with an array
// of offsets. Then the offsets are added in parallel to produce the string "World!"
// By Ingemar Ragnemalm 2010
#include <stdio.h>
const int N = 16;
const int blocksize = 16;
__global__
void hello(char *a, int *b)
{
a[threadIdx.x] += b[threadIdx.x];
}
int main()
{
char a[N] = "Hello \0\0\0\0\0\0";
int b[N] = {15, 10, 6, 0, -11, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
printf("%s\n", a);
char *ad;
int *bd;
const int csize = N*sizeof(char);
const int isize = N*sizeof(int);
printf("%s", a);
hipMalloc( (void**)&ad, csize );
hipMalloc( (void**)&bd, isize );
hipMemcpy( ad, a, csize, hipMemcpyHostToDevice );
hipMemcpy( bd, b, isize, hipMemcpyHostToDevice );
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
hipLaunchKernelGGL(( hello), dim3(dimGrid), dim3(dimBlock), 0, 0, ad, bd);
hipMemcpy( a, ad, csize, hipMemcpyDeviceToHost );
hipFree( ad );
hipFree( bd );
printf("%s\n", a);
return EXIT_SUCCESS;
}
|
973056ff5f5d28f8ccab2b4dc401d09530afa413.cu
|
// This is the REAL "hello world" for CUDA!
// It takes the string "Hello ", prints it, then passes it to CUDA with an array
// of offsets. Then the offsets are added in parallel to produce the string "World!"
// By Ingemar Ragnemalm 2010
#include <stdio.h>
const int N = 16;
const int blocksize = 16;
__global__
void hello(char *a, int *b)
{
a[threadIdx.x] += b[threadIdx.x];
}
int main()
{
char a[N] = "Hello \0\0\0\0\0\0";
int b[N] = {15, 10, 6, 0, -11, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
printf("%s\n", a);
char *ad;
int *bd;
const int csize = N*sizeof(char);
const int isize = N*sizeof(int);
printf("%s", a);
cudaMalloc( (void**)&ad, csize );
cudaMalloc( (void**)&bd, isize );
cudaMemcpy( ad, a, csize, cudaMemcpyHostToDevice );
cudaMemcpy( bd, b, isize, cudaMemcpyHostToDevice );
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
hello<<<dimGrid, dimBlock>>>(ad, bd);
cudaMemcpy( a, ad, csize, cudaMemcpyDeviceToHost );
cudaFree( ad );
cudaFree( bd );
printf("%s\n", a);
return EXIT_SUCCESS;
}
|
59fd1c20a2bf67441342f48c2e7dd6478ce7620c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef uint32_t
#define uint32_t unsigned int
#endif
#define H0 0x6a09e667
#define H1 0xbb67ae85
#define H2 0x3c6ef372
#define H3 0xa54ff53a
#define H4 0x510e527f
#define H5 0x9b05688c
#define H6 0x1f83d9ab
#define H7 0x5be0cd19
__device__ __forceinline__
uint rotr(uint x, int n) {
n < 32 ? x = ((x >> n) | (x << (32 - n))) : x;
return x;
}
__device__ __forceinline__
uint ch(uint x, uint y, uint z) {
return (x & y) ^ (~x & z);
}
__device__ __forceinline__
uint maj(uint x, uint y, uint z) {
return (x & y) ^ (x & z) ^ (y & z);
}
__device__ __forceinline__
uint sigma0(uint x) {
return rotr(x, 2) ^ rotr(x, 13) ^ rotr(x, 22);
}
__device__ __forceinline__
uint sigma1(uint x) {
return rotr(x, 6) ^ rotr(x, 11) ^ rotr(x, 25);
}
__device__ __forceinline__
uint gamma0(uint x) {
return rotr(x, 7) ^ rotr(x, 18) ^ (x >> 3);
}
__device__ __forceinline__
uint gamma1(uint x) {
return rotr(x, 17) ^ rotr(x, 19) ^ (x >> 10);
}
__constant__ uint K[64]={
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
};
__device__ __forceinline__
uint get_global_id() {
uint blockId, threadsPerBlock;
blockId = blockIdx.z * gridDim.x * gridDim.y
+ blockIdx.y * gridDim.x
+ blockIdx.x;
threadsPerBlock = blockDim.x;
return threadIdx.x + threadsPerBlock * blockId;
}
__global__ void sha256_crypt_kernel(ulong start, uint *prefix, ulong plen, uint mask, uint *match){
int t;
uint W[80], rnd, id, A,B,C,D,E,F,G,H,T1,T2;
uint Ws[16];
id = get_global_id();
//if (id == 0) {
// printf("%08x\n", start);
//}
// brutforce is build up as: prefix | thr_id:04x | <rnd>:04x | start:08x
#pragma unroll
for (t = 0; t < plen; ++t) {
Ws[t] = prefix[t];
// printf("%04x", prefix[t]);
}
// printf("%04x\n", id);
T1 = (id & 0xf) | (((id >> 4) & 0xf) << 8) | (((id >> 8) & 0xf) << 16) | (((id >> 12) & 0xf) << 24);
T2 = (T1 & 0xe0e0e0e);
T2 = ((((T2 >> 1) & T2) >> 2) | (((T2 >> 2) & T2) >> 1)) & 0x1010101;
Ws[plen] = T1 + 0x30303030 + T2 * 0x27;
T1 = (uint)(start >> 32);
T1 = (T1 & 0xf) | (((T1 >> 4) & 0xf) << 8) | (((T1 >> 8) & 0xf) << 16) | (((T1 >> 12) & 0xf) << 24);
T2 = (T1 & 0xe0e0e0e);
T2 = ((((T2 >> 1) & T2) >> 2) | (((T2 >> 2) & T2) >> 1)) & 0x1010101;
Ws[plen + 2] = T1 + 0x30303030 + T2 * 0x27;
T1 = (uint)start;
T1 = (T1 & 0xf) | (((T1 >> 4) & 0xf) << 8) | (((T1 >> 8) & 0xf) << 16) | (((T1 >> 12) & 0xf) << 24);
T2 = (T1 & 0xe0e0e0e);
T2 = ((((T2 >> 1) & T2) >> 2) | (((T2 >> 2) & T2) >> 1)) & 0x1010101;
Ws[plen + 3] = T1 + 0x30303030 + T2 * 0x27;
Ws[plen + 4] = 0x80000000;
#pragma unroll
for (t = plen + 5; t < 15; ++t) {
Ws[t] = 0;
}
Ws[15] = 128 + 32 * plen;
// preparing buffer done
/*
if (id == 0) {
printf("%016x: ", start);
for (t = 0; t < 16; ++t) {
printf("%08x", Ws[t]);
}
printf(" - %u\n", Ws[15]);
}
*/
for (rnd = 0; rnd < 0x10000; ++rnd) {
uint digest[8] = {H0, H1, H2, H3, H4, H5, H6, H7};
#pragma unroll
for (t = 0; t < 16; ++t) {
W[t] = Ws[t];
}
T1 = (rnd & 0xf) | (((rnd >> 4) & 0xf) << 8) | (((rnd >> 8) & 0xf) << 16) | (((rnd >> 12) & 0xf) << 24);
T2 = (T1 & 0xe0e0e0e);
T2 = ((((T2 >> 1) & T2) >> 2) | (((T2 >> 2) & T2) >> 1)) & 0x1010101;
W[plen + 1] = T1 + 0x30303030 + T2 * 0x27;
A = digest[0] = H0;
B = digest[1] = H1;
C = digest[2] = H2;
D = digest[3] = H3;
E = digest[4] = H4;
F = digest[5] = H5;
G = digest[6] = H6;
H = digest[7] = H7;
for (t = 16; t < 64; t++) {
W[t] = gamma1(W[t - 2]) + W[t - 7] + gamma0(W[t - 15]) + W[t - 16];
}
for (t = 0; t < 64; t++) {
T1 = H + sigma1(E) + ch(E, F, G) + K[t] + W[t];
T2 = sigma0(A) + maj(A, B, C);
H = G; G = F; F = E; E = D + T1; D = C; C = B; B = A; A = T1 + T2;
}
digest[0] += A;
if ((digest[0] & mask) == 0) {
/*
for (t = 0; t < 16; ++t) {
printf("%08x", Ws[t]);
}
printf(" - %u\n", Ws[15]);
*/
match[0] = 1;
match[1] = id;
match[2] = rnd;
}
}
}
|
59fd1c20a2bf67441342f48c2e7dd6478ce7620c.cu
|
#ifndef uint32_t
#define uint32_t unsigned int
#endif
#define H0 0x6a09e667
#define H1 0xbb67ae85
#define H2 0x3c6ef372
#define H3 0xa54ff53a
#define H4 0x510e527f
#define H5 0x9b05688c
#define H6 0x1f83d9ab
#define H7 0x5be0cd19
__device__ __forceinline__
uint rotr(uint x, int n) {
n < 32 ? x = ((x >> n) | (x << (32 - n))) : x;
return x;
}
__device__ __forceinline__
uint ch(uint x, uint y, uint z) {
return (x & y) ^ (~x & z);
}
__device__ __forceinline__
uint maj(uint x, uint y, uint z) {
return (x & y) ^ (x & z) ^ (y & z);
}
__device__ __forceinline__
uint sigma0(uint x) {
return rotr(x, 2) ^ rotr(x, 13) ^ rotr(x, 22);
}
__device__ __forceinline__
uint sigma1(uint x) {
return rotr(x, 6) ^ rotr(x, 11) ^ rotr(x, 25);
}
__device__ __forceinline__
uint gamma0(uint x) {
return rotr(x, 7) ^ rotr(x, 18) ^ (x >> 3);
}
__device__ __forceinline__
uint gamma1(uint x) {
return rotr(x, 17) ^ rotr(x, 19) ^ (x >> 10);
}
__constant__ uint K[64]={
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
};
__device__ __forceinline__
uint get_global_id() {
uint blockId, threadsPerBlock;
blockId = blockIdx.z * gridDim.x * gridDim.y
+ blockIdx.y * gridDim.x
+ blockIdx.x;
threadsPerBlock = blockDim.x;
return threadIdx.x + threadsPerBlock * blockId;
}
__global__ void sha256_crypt_kernel(ulong start, uint *prefix, ulong plen, uint mask, uint *match){
int t;
uint W[80], rnd, id, A,B,C,D,E,F,G,H,T1,T2;
uint Ws[16];
id = get_global_id();
//if (id == 0) {
// printf("%08x\n", start);
//}
// brutforce is build up as: prefix | thr_id:04x | <rnd>:04x | start:08x
#pragma unroll
for (t = 0; t < plen; ++t) {
Ws[t] = prefix[t];
// printf("%04x", prefix[t]);
}
// printf("%04x\n", id);
T1 = (id & 0xf) | (((id >> 4) & 0xf) << 8) | (((id >> 8) & 0xf) << 16) | (((id >> 12) & 0xf) << 24);
T2 = (T1 & 0xe0e0e0e);
T2 = ((((T2 >> 1) & T2) >> 2) | (((T2 >> 2) & T2) >> 1)) & 0x1010101;
Ws[plen] = T1 + 0x30303030 + T2 * 0x27;
T1 = (uint)(start >> 32);
T1 = (T1 & 0xf) | (((T1 >> 4) & 0xf) << 8) | (((T1 >> 8) & 0xf) << 16) | (((T1 >> 12) & 0xf) << 24);
T2 = (T1 & 0xe0e0e0e);
T2 = ((((T2 >> 1) & T2) >> 2) | (((T2 >> 2) & T2) >> 1)) & 0x1010101;
Ws[plen + 2] = T1 + 0x30303030 + T2 * 0x27;
T1 = (uint)start;
T1 = (T1 & 0xf) | (((T1 >> 4) & 0xf) << 8) | (((T1 >> 8) & 0xf) << 16) | (((T1 >> 12) & 0xf) << 24);
T2 = (T1 & 0xe0e0e0e);
T2 = ((((T2 >> 1) & T2) >> 2) | (((T2 >> 2) & T2) >> 1)) & 0x1010101;
Ws[plen + 3] = T1 + 0x30303030 + T2 * 0x27;
Ws[plen + 4] = 0x80000000;
#pragma unroll
for (t = plen + 5; t < 15; ++t) {
Ws[t] = 0;
}
Ws[15] = 128 + 32 * plen;
// preparing buffer done
/*
if (id == 0) {
printf("%016x: ", start);
for (t = 0; t < 16; ++t) {
printf("%08x", Ws[t]);
}
printf(" - %u\n", Ws[15]);
}
*/
for (rnd = 0; rnd < 0x10000; ++rnd) {
uint digest[8] = {H0, H1, H2, H3, H4, H5, H6, H7};
#pragma unroll
for (t = 0; t < 16; ++t) {
W[t] = Ws[t];
}
T1 = (rnd & 0xf) | (((rnd >> 4) & 0xf) << 8) | (((rnd >> 8) & 0xf) << 16) | (((rnd >> 12) & 0xf) << 24);
T2 = (T1 & 0xe0e0e0e);
T2 = ((((T2 >> 1) & T2) >> 2) | (((T2 >> 2) & T2) >> 1)) & 0x1010101;
W[plen + 1] = T1 + 0x30303030 + T2 * 0x27;
A = digest[0] = H0;
B = digest[1] = H1;
C = digest[2] = H2;
D = digest[3] = H3;
E = digest[4] = H4;
F = digest[5] = H5;
G = digest[6] = H6;
H = digest[7] = H7;
for (t = 16; t < 64; t++) {
W[t] = gamma1(W[t - 2]) + W[t - 7] + gamma0(W[t - 15]) + W[t - 16];
}
for (t = 0; t < 64; t++) {
T1 = H + sigma1(E) + ch(E, F, G) + K[t] + W[t];
T2 = sigma0(A) + maj(A, B, C);
H = G; G = F; F = E; E = D + T1; D = C; C = B; B = A; A = T1 + T2;
}
digest[0] += A;
if ((digest[0] & mask) == 0) {
/*
for (t = 0; t < 16; ++t) {
printf("%08x", Ws[t]);
}
printf(" - %u\n", Ws[15]);
*/
match[0] = 1;
match[1] = id;
match[2] = rnd;
}
}
}
|
f0e078db9d747cf81870e52a3195f299b6fac478.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define COALESCED_NUM 32
#define blockDimX 256
#define blockDimY 1
#define gridDimX (gridDim.x)
#define gridDimY (gridDim.y)
#define idx (blockIdx.x*blockDimX+threadIdx.x)
#define idy (blockIdx.y*blockDimY+threadIdx.y)
#define bidy (blockIdx.y)
#define bidx (blockIdx.x)
#define tidx (threadIdx.x)
#define tidy (threadIdx.y)
#define merger_y 1
#define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM)
#define globalDimY 1
#define WIDTH_A 2048
#define A(y,x) A[(y)*WIDTH_A+(x)]
__global__ void tmv(float * A, float * B, float * C, int width)
{
__shared__ float shared_0[32];
int i;
float sum;
i=0;
sum=0;
for (i=0; i<width; i=(i+32))
{
int it_1;
if ((tidx<32))
{
shared_0[(tidx+0)]=B[(i+tidx)];
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<32; it_1=(it_1+1))
{
float a;
float b;
a=A((it_1+i), idx);
b=shared_0[it_1];
sum+=(a*b);
}
__syncthreads();
}
{
C[idx]=sum;
}
}
|
f0e078db9d747cf81870e52a3195f299b6fac478.cu
|
#define COALESCED_NUM 32
#define blockDimX 256
#define blockDimY 1
#define gridDimX (gridDim.x)
#define gridDimY (gridDim.y)
#define idx (blockIdx.x*blockDimX+threadIdx.x)
#define idy (blockIdx.y*blockDimY+threadIdx.y)
#define bidy (blockIdx.y)
#define bidx (blockIdx.x)
#define tidx (threadIdx.x)
#define tidy (threadIdx.y)
#define merger_y 1
#define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM)
#define globalDimY 1
#define WIDTH_A 2048
#define A(y,x) A[(y)*WIDTH_A+(x)]
__global__ void tmv(float * A, float * B, float * C, int width)
{
__shared__ float shared_0[32];
int i;
float sum;
i=0;
sum=0;
for (i=0; i<width; i=(i+32))
{
int it_1;
if ((tidx<32))
{
shared_0[(tidx+0)]=B[(i+tidx)];
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<32; it_1=(it_1+1))
{
float a;
float b;
a=A((it_1+i), idx);
b=shared_0[it_1];
sum+=(a*b);
}
__syncthreads();
}
{
C[idx]=sum;
}
}
|
d0dd70d390fbc8d143bbeca235d09a3587189e86.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
Provides an interface to the CUFFT package.
Testing examples can be found in ~src/mat/examples/tests
*/
#include <petsc/private/matimpl.h> /*I "petscmat.h" I*/
EXTERN_C_BEGIN
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hipfft.h>
EXTERN_C_END
typedef struct {
PetscInt ndim;
PetscInt *dim;
hipfftHandle p_forward, p_backward;
hipfftComplex *devArray;
} Mat_CUFFT;
PetscErrorCode MatMult_SeqCUFFT(Mat A, Vec x, Vec y)
{
Mat_CUFFT *cufft = (Mat_CUFFT*) A->data;
hipfftComplex *devArray = cufft->devArray;
PetscInt ndim = cufft->ndim, *dim = cufft->dim;
PetscScalar *x_array, *y_array;
hipfftResult result;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecGetArray(x, &x_array);CHKERRQ(ierr);
ierr = VecGetArray(y, &y_array);CHKERRQ(ierr);
if (!cufft->p_forward) {
hipfftResult result;
/* create a plan, then execute it */
switch (ndim) {
case 1:
result = hipfftPlan1d(&cufft->p_forward, dim[0], HIPFFT_C2C, 1);CHKERRQ(result != HIPFFT_SUCCESS);
break;
case 2:
result = hipfftPlan2d(&cufft->p_forward, dim[0], dim[1], HIPFFT_C2C);CHKERRQ(result != HIPFFT_SUCCESS);
break;
case 3:
result = hipfftPlan3d(&cufft->p_forward, dim[0], dim[1], dim[2], HIPFFT_C2C);CHKERRQ(result != HIPFFT_SUCCESS);
break;
default:
SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_USER, "Cannot create plan for %d-dimensional transform", ndim);
}
}
/* transfer to GPU memory */
hipMemcpy(devArray, x_array, sizeof(hipfftComplex)*dim[ndim], hipMemcpyHostToDevice);
/* execute transform */
result = hipfftExecC2C(cufft->p_forward, devArray, devArray, HIPFFT_FORWARD);CHKERRQ(result != HIPFFT_SUCCESS);
/* transfer from GPU memory */
hipMemcpy(y_array, devArray, sizeof(hipfftComplex)*dim[ndim], hipMemcpyDeviceToHost);
ierr = VecRestoreArray(y, &y_array);CHKERRQ(ierr);
ierr = VecRestoreArray(x, &x_array);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode MatMultTranspose_SeqCUFFT(Mat A, Vec x, Vec y)
{
Mat_CUFFT *cufft = (Mat_CUFFT*) A->data;
hipfftComplex *devArray = cufft->devArray;
PetscInt ndim = cufft->ndim, *dim = cufft->dim;
PetscScalar *x_array, *y_array;
hipfftResult result;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecGetArray(x, &x_array);CHKERRQ(ierr);
ierr = VecGetArray(y, &y_array);CHKERRQ(ierr);
if (!cufft->p_backward) {
/* create a plan, then execute it */
switch (ndim) {
case 1:
result = hipfftPlan1d(&cufft->p_backward, dim[0], HIPFFT_C2C, 1);CHKERRQ(result != HIPFFT_SUCCESS);
break;
case 2:
result = hipfftPlan2d(&cufft->p_backward, dim[0], dim[1], HIPFFT_C2C);CHKERRQ(result != HIPFFT_SUCCESS);
break;
case 3:
result = hipfftPlan3d(&cufft->p_backward, dim[0], dim[1], dim[2], HIPFFT_C2C);CHKERRQ(result != HIPFFT_SUCCESS);
break;
default:
SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_USER, "Cannot create plan for %d-dimensional transform", ndim);
}
}
/* transfer to GPU memory */
hipMemcpy(devArray, x_array, sizeof(hipfftComplex)*dim[ndim], hipMemcpyHostToDevice);
/* execute transform */
result = hipfftExecC2C(cufft->p_forward, devArray, devArray, HIPFFT_BACKWARD);CHKERRQ(result != HIPFFT_SUCCESS);
/* transfer from GPU memory */
hipMemcpy(y_array, devArray, sizeof(hipfftComplex)*dim[ndim], hipMemcpyDeviceToHost);
ierr = VecRestoreArray(y, &y_array);CHKERRQ(ierr);
ierr = VecRestoreArray(x, &x_array);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode MatDestroy_SeqCUFFT(Mat A)
{
Mat_CUFFT *cufft = (Mat_CUFFT*) A->data;
hipfftResult result;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = PetscFree(cufft->dim);CHKERRQ(ierr);
if (cufft->p_forward) {result = hipfftDestroy(cufft->p_forward);CHKERRQ(result != HIPFFT_SUCCESS);}
if (cufft->p_backward) {result = hipfftDestroy(cufft->p_backward);CHKERRQ(result != HIPFFT_SUCCESS);}
hipFree(cufft->devArray);
ierr = PetscFree(A->data);CHKERRQ(ierr);
ierr = PetscObjectChangeTypeName((PetscObject)A,0);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/*@
MatCreateSeqCUFFT - Creates a matrix object that provides sequential FFT via the external package CUFFT
Collective
Input Parameters:
+ comm - MPI communicator, set to PETSC_COMM_SELF
. ndim - the ndim-dimensional transform
- dim - array of size ndim, dim[i] contains the vector length in the i-dimension
Output Parameter:
. A - the matrix
Options Database Keys:
. -mat_cufft_plannerflags - set CUFFT planner flags
Level: intermediate
@*/
PetscErrorCode MatCreateSeqCUFFT(MPI_Comm comm, PetscInt ndim, const PetscInt dim[], Mat *A)
{
Mat_CUFFT *cufft;
PetscInt m, d;
PetscErrorCode ierr;
PetscFunctionBegin;
if (ndim < 0) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_USER, "ndim %d must be > 0", ndim);
ierr = MatCreate(comm, A);CHKERRQ(ierr);
m = 1;
for (d = 0; d < ndim; ++d) {
if (dim[d] < 0) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_USER, "dim[%d]=%d must be > 0", d, dim[d]);
m *= dim[d];
}
ierr = MatSetSizes(*A, m, m, m, m);CHKERRQ(ierr);
ierr = PetscObjectChangeTypeName((PetscObject)*A, MATSEQCUFFT);CHKERRQ(ierr);
ierr = PetscNewLog(*A,&cufft);CHKERRQ(ierr);
(*A)->data = (void*) cufft;
ierr = PetscMalloc1(ndim+1, &cufft->dim);CHKERRQ(ierr);
ierr = PetscArraycpy(cufft->dim, dim, ndim);CHKERRQ(ierr);
cufft->ndim = ndim;
cufft->p_forward = 0;
cufft->p_backward = 0;
cufft->dim[ndim] = m;
/* GPU memory allocation */
hipMalloc((void**) &cufft->devArray, sizeof(hipfftComplex)*m);
(*A)->ops->mult = MatMult_SeqCUFFT;
(*A)->ops->multtranspose = MatMultTranspose_SeqCUFFT;
(*A)->assembled = PETSC_TRUE;
(*A)->ops->destroy = MatDestroy_SeqCUFFT;
/* get runtime options */
ierr = PetscOptionsBegin(comm, ((PetscObject)(*A))->prefix, "CUFFT Options", "Mat");CHKERRQ(ierr);
ierr = PetscOptionsEnd();CHKERRQ(ierr);
PetscFunctionReturn(0);
}
|
d0dd70d390fbc8d143bbeca235d09a3587189e86.cu
|
/*
Provides an interface to the CUFFT package.
Testing examples can be found in ~src/mat/examples/tests
*/
#include <petsc/private/matimpl.h> /*I "petscmat.h" I*/
EXTERN_C_BEGIN
#include <cuda.h>
#include <cuda_runtime.h>
#include <cufft.h>
EXTERN_C_END
typedef struct {
PetscInt ndim;
PetscInt *dim;
cufftHandle p_forward, p_backward;
cufftComplex *devArray;
} Mat_CUFFT;
PetscErrorCode MatMult_SeqCUFFT(Mat A, Vec x, Vec y)
{
Mat_CUFFT *cufft = (Mat_CUFFT*) A->data;
cufftComplex *devArray = cufft->devArray;
PetscInt ndim = cufft->ndim, *dim = cufft->dim;
PetscScalar *x_array, *y_array;
cufftResult result;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecGetArray(x, &x_array);CHKERRQ(ierr);
ierr = VecGetArray(y, &y_array);CHKERRQ(ierr);
if (!cufft->p_forward) {
cufftResult result;
/* create a plan, then execute it */
switch (ndim) {
case 1:
result = cufftPlan1d(&cufft->p_forward, dim[0], CUFFT_C2C, 1);CHKERRQ(result != CUFFT_SUCCESS);
break;
case 2:
result = cufftPlan2d(&cufft->p_forward, dim[0], dim[1], CUFFT_C2C);CHKERRQ(result != CUFFT_SUCCESS);
break;
case 3:
result = cufftPlan3d(&cufft->p_forward, dim[0], dim[1], dim[2], CUFFT_C2C);CHKERRQ(result != CUFFT_SUCCESS);
break;
default:
SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_USER, "Cannot create plan for %d-dimensional transform", ndim);
}
}
/* transfer to GPU memory */
cudaMemcpy(devArray, x_array, sizeof(cufftComplex)*dim[ndim], cudaMemcpyHostToDevice);
/* execute transform */
result = cufftExecC2C(cufft->p_forward, devArray, devArray, CUFFT_FORWARD);CHKERRQ(result != CUFFT_SUCCESS);
/* transfer from GPU memory */
cudaMemcpy(y_array, devArray, sizeof(cufftComplex)*dim[ndim], cudaMemcpyDeviceToHost);
ierr = VecRestoreArray(y, &y_array);CHKERRQ(ierr);
ierr = VecRestoreArray(x, &x_array);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode MatMultTranspose_SeqCUFFT(Mat A, Vec x, Vec y)
{
Mat_CUFFT *cufft = (Mat_CUFFT*) A->data;
cufftComplex *devArray = cufft->devArray;
PetscInt ndim = cufft->ndim, *dim = cufft->dim;
PetscScalar *x_array, *y_array;
cufftResult result;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecGetArray(x, &x_array);CHKERRQ(ierr);
ierr = VecGetArray(y, &y_array);CHKERRQ(ierr);
if (!cufft->p_backward) {
/* create a plan, then execute it */
switch (ndim) {
case 1:
result = cufftPlan1d(&cufft->p_backward, dim[0], CUFFT_C2C, 1);CHKERRQ(result != CUFFT_SUCCESS);
break;
case 2:
result = cufftPlan2d(&cufft->p_backward, dim[0], dim[1], CUFFT_C2C);CHKERRQ(result != CUFFT_SUCCESS);
break;
case 3:
result = cufftPlan3d(&cufft->p_backward, dim[0], dim[1], dim[2], CUFFT_C2C);CHKERRQ(result != CUFFT_SUCCESS);
break;
default:
SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_USER, "Cannot create plan for %d-dimensional transform", ndim);
}
}
/* transfer to GPU memory */
cudaMemcpy(devArray, x_array, sizeof(cufftComplex)*dim[ndim], cudaMemcpyHostToDevice);
/* execute transform */
result = cufftExecC2C(cufft->p_forward, devArray, devArray, CUFFT_INVERSE);CHKERRQ(result != CUFFT_SUCCESS);
/* transfer from GPU memory */
cudaMemcpy(y_array, devArray, sizeof(cufftComplex)*dim[ndim], cudaMemcpyDeviceToHost);
ierr = VecRestoreArray(y, &y_array);CHKERRQ(ierr);
ierr = VecRestoreArray(x, &x_array);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode MatDestroy_SeqCUFFT(Mat A)
{
Mat_CUFFT *cufft = (Mat_CUFFT*) A->data;
cufftResult result;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = PetscFree(cufft->dim);CHKERRQ(ierr);
if (cufft->p_forward) {result = cufftDestroy(cufft->p_forward);CHKERRQ(result != CUFFT_SUCCESS);}
if (cufft->p_backward) {result = cufftDestroy(cufft->p_backward);CHKERRQ(result != CUFFT_SUCCESS);}
cudaFree(cufft->devArray);
ierr = PetscFree(A->data);CHKERRQ(ierr);
ierr = PetscObjectChangeTypeName((PetscObject)A,0);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/*@
MatCreateSeqCUFFT - Creates a matrix object that provides sequential FFT via the external package CUFFT
Collective
Input Parameters:
+ comm - MPI communicator, set to PETSC_COMM_SELF
. ndim - the ndim-dimensional transform
- dim - array of size ndim, dim[i] contains the vector length in the i-dimension
Output Parameter:
. A - the matrix
Options Database Keys:
. -mat_cufft_plannerflags - set CUFFT planner flags
Level: intermediate
@*/
PetscErrorCode MatCreateSeqCUFFT(MPI_Comm comm, PetscInt ndim, const PetscInt dim[], Mat *A)
{
Mat_CUFFT *cufft;
PetscInt m, d;
PetscErrorCode ierr;
PetscFunctionBegin;
if (ndim < 0) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_USER, "ndim %d must be > 0", ndim);
ierr = MatCreate(comm, A);CHKERRQ(ierr);
m = 1;
for (d = 0; d < ndim; ++d) {
if (dim[d] < 0) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_USER, "dim[%d]=%d must be > 0", d, dim[d]);
m *= dim[d];
}
ierr = MatSetSizes(*A, m, m, m, m);CHKERRQ(ierr);
ierr = PetscObjectChangeTypeName((PetscObject)*A, MATSEQCUFFT);CHKERRQ(ierr);
ierr = PetscNewLog(*A,&cufft);CHKERRQ(ierr);
(*A)->data = (void*) cufft;
ierr = PetscMalloc1(ndim+1, &cufft->dim);CHKERRQ(ierr);
ierr = PetscArraycpy(cufft->dim, dim, ndim);CHKERRQ(ierr);
cufft->ndim = ndim;
cufft->p_forward = 0;
cufft->p_backward = 0;
cufft->dim[ndim] = m;
/* GPU memory allocation */
cudaMalloc((void**) &cufft->devArray, sizeof(cufftComplex)*m);
(*A)->ops->mult = MatMult_SeqCUFFT;
(*A)->ops->multtranspose = MatMultTranspose_SeqCUFFT;
(*A)->assembled = PETSC_TRUE;
(*A)->ops->destroy = MatDestroy_SeqCUFFT;
/* get runtime options */
ierr = PetscOptionsBegin(comm, ((PetscObject)(*A))->prefix, "CUFFT Options", "Mat");CHKERRQ(ierr);
ierr = PetscOptionsEnd();CHKERRQ(ierr);
PetscFunctionReturn(0);
}
|
c02cf8b6c30f4756222f607daced9e568a4aa299.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "main.cuh"
int main(int argc, char **argv) {
// Create default stream for the chain
hipStream_t stream_dmrs;
hipStreamCreate(&stream_default);
hipStreamCreate(&stream_dmrs);
//For timing purpose
timerInit();
startTimer();
int N_bits, N_ri;
const int Qm = 6; // 64QAM Modulation
const int N_l = 1; // Number of Layers
// Physical layer cell identity (we need for generation of random sequence)
int N_id_cell = 2; // assume enodeB scheduled cell 2 for the UE
int M_pusch_rb = 100; // number of resource blocks assigned to the UE
int n_s = 0; // assume UE send on time slot 0
int n_RNTI = 10; // radio network temporary identifier given to the UE by enodeB(assume 10)
// (UNUSED) int N_subfr = 0; // Subframe number within a radio frame
BYTE* inputBits_h = readBits(argc, argv[1], &N_bits); //Get input bits from the text file
BYTE* riBits_h = readBits(argc, argv[2], &N_ri); //Get RI bits from the text file
//hipMalloc & hipMemcpy for inputBits & RI_Bits to Device
Byte *inputBits_d = 0, *riBits_d = 0;
hipMalloc((void **)&inputBits_d, sizeof(Byte)*N_bits);
hipMalloc((void **)&riBits_d, sizeof(Byte)*N_ri);
Byte* c_d = 0;
hipMalloc((void **)&c_d, sizeof(Byte)*N_bits);
// Copy data to the device using different stream
hipStream_t stream_mem;
hipStreamCreate(&stream_mem);
hipMemcpyAsync(inputBits_d, inputBits_h, sizeof(Byte)*N_bits, hipMemcpyHostToDevice, stream_mem);
hipMemcpyAsync(riBits_d, riBits_h, sizeof(Byte)*N_ri, hipMemcpyHostToDevice, stream_default);
stopTimer("hipMalloc & hipMemcpy for inputBits & RI_Bits Time= %.6f ms\n", elapsed);
//Create Plans
startTimer();
hipfftHandle plan_transform_precoder;
int n[1] = { N_sc_rb*M_pusch_rb };
hipfftPlanMany(&plan_transform_precoder, 1, n, NULL, 1, n[0], NULL, 1, N_sc_rb*M_pusch_rb, HIPFFT_C2C, ((N_bits + N_ri) / Qm)/n[0]);
hipfftHandle plan_sc_fdma;
n[0] = { FFT_size };
hipfftPlanMany(&plan_sc_fdma, 1, n, NULL, 1, FFT_size, NULL, 1, FFT_size, HIPFFT_C2C, N_symbs_per_subframe);
stopTimer("Create Plans Time= %.6f ms\n", elapsed);
//Device data allocation
startTimer();
//timer_test << <1, 1 >> > ();
int data_vec_len = Qm*N_l;
// (UNUSED) int ri_vec_len = Qm*N_l;
int N_data_bits = N_bits / data_vec_len;
int N_ri_bits = N_ri / data_vec_len;
int H_prime = N_data_bits;
// (UNUSED) int H_vec_len = data_vec_len;
int H_prime_total = H_prime + N_ri_bits;
int R_mux = (H_prime_total*Qm*N_l) / N_pusch_symbs;
int R_prime_mux = R_mux / (Qm*N_l);
Byte *y_idx_d, *y_mat_d, *interleaved_d;
hipMalloc((void **)&y_idx_d, sizeof(Byte)*(N_pusch_symbs * R_prime_mux));
hipMalloc((void **)&y_mat_d, sizeof(Byte)*(N_pusch_symbs*R_mux));
hipMalloc((void **)&interleaved_d, sizeof(Byte)*(N_pusch_symbs*R_mux));
Byte *scrambledbits_d = 0;
hipMalloc((void **)&scrambledbits_d, sizeof(Byte)*N_bits);
Byte *bits_each_Qm_d;
//float* symbols_R_d = 0, *symbols_I_d = 0;
hipMalloc((void **)&bits_each_Qm_d, sizeof(Byte)*(N_bits / Qm));
//hipMalloc((void **)&symbols_R_d, sizeof(float)*(N_bits / Qm));
//hipMalloc((void **)&symbols_I_d, sizeof(float)*(N_bits / Qm));
hipfftComplex *precoded_symbols_d = 0, *cuComplex_symbols_d = 0;
hipMalloc((void **)&cuComplex_symbols_d, sizeof(hipfftComplex)*(N_bits / Qm));
hipMalloc((void **)&precoded_symbols_d, sizeof(hipfftComplex)*(N_bits / Qm));
hipfftComplex* x_q_d;
hipfftComplex* dmrs_1_d = 0, *dmrs_2_d = 0;
hipMalloc((void **)&dmrs_1_d, sizeof(hipfftComplex)*N_sc_rb*M_pusch_rb);
hipMalloc((void **)&dmrs_2_d, sizeof(hipfftComplex)*N_sc_rb*M_pusch_rb);
hipMalloc((void **)&x_q_d, sizeof(hipfftComplex)*prime_nums[M_pusch_rb - 1]);
hipfftComplex *subframe_d = 0;
hipMalloc((void **)&subframe_d, sizeof(hipfftComplex)*N_symbs_per_subframe*N_sc_rb*M_pusch_rb);
hipfftComplex* ifft_vec_d;
hipfftComplex *pusch_bb_d = 0;
hipMalloc((void **)&ifft_vec_d, sizeof(hipfftComplex)*N_symbs_per_subframe*FFT_size);
hipMalloc((void **)&pusch_bb_d, sizeof(hipfftComplex)*modulated_subframe_length);
stopTimer("Device data allocation Time= %.6f ms\n", elapsed);
/*startTimer();
stopTimer("Overhead of timer= %.6f ms\n", elapsed);*/
int times = 1;
startTimer();
//Generate Pseudo Random Seq.
Byte *c_h = 0;
generate_psuedo_random_seq(&c_h, N_bits, n_RNTI, n_s, N_id_cell);
//Copy (c) to Device
hipMemcpyAsync(c_d, c_h, sizeof(Byte)*N_bits, hipMemcpyHostToDevice, stream_default);
//for (int i = 0; i < times; i++)
//{
//Interleaver
interleaver(inputBits_d, riBits_d, &interleaved_d, N_bits, N_ri, Qm, N_l, y_idx_d, y_mat_d);
//Generate DMRS
generate_dmrs_pusch(0, N_id_cell, 0, 0, 0, 0, 0, "fixed", M_pusch_rb, 0, &dmrs_1_d, &dmrs_2_d, x_q_d, stream_dmrs);
//Scrambler
scrambler(interleaved_d, &scrambledbits_d, c_d, N_bits + N_ri);
//Mapper
mapper(scrambledbits_d, N_bits + N_ri, Qm, M_pusch_rb, cuComplex_symbols_d, bits_each_Qm_d); // Mohammed
//Transform Precoder
transform_precoder(&precoded_symbols_d, plan_transform_precoder, cuComplex_symbols_d);
//Multiplexing the DMRS with the Data
compose_subframe(precoded_symbols_d, dmrs_1_d, dmrs_2_d, M_pusch_rb, &subframe_d);
// Generate SC-FDMA signal
sc_fdma_modulator(subframe_d, M_pusch_rb, &pusch_bb_d, plan_sc_fdma, ifft_vec_d);
//timer_test << <1, 1 >> > ();
//startTimer();
hipfftComplex *pusch_bb_h = (hipfftComplex *)malloc(sizeof(hipfftComplex)*(30720));
hipMemcpyAsync(pusch_bb_h, pusch_bb_d, sizeof(hipfftComplex)*(30720), hipMemcpyDeviceToHost, stream_default);
//}
stopTimer("Processing Time= %.6f ms\n", elapsed/ times);
//To compare with MATLAB results
//Run the file (output.m)
//int NNN = modulated_subframe_length;
//FILE *results;
//if ((results = freopen("output.m", "w+", stdout)) == NULL) {
// printf("Cannot open file.\n");
// exit(1);
//}
//printf("clear; clc;\nsymbols_real = [ ");
//for (int i = 0; i < NNN; i++)
//{
// printf("%10f", pusch_bb_h[i].x);
// if (i != (NNN -1))
// printf(",");
//}
//printf(" ];\nsymbols_imag = [ ");
//for (int i = 0; i < NNN; i++)
//{
// printf("%10f", pusch_bb_h[i].y);
// if (i != (NNN -1))
// printf(",");
//}
//printf(" ];\n");
//printf("symbols_CUDA = symbols_real + 1i * symbols_imag;\n");
////Matlab code
//printf("matlab_test");
//fclose(results);
//if ((results = freopen("matlab_test.m", "w+", stdout)) == NULL) {
// printf("Cannot open file.\n");
// exit(1);
//}
//printf("N_bits = %d; \n", N_bits);
//if(Qm == 6)
// printf("mod_type = %s; \n", "'64qam'");
//else if (Qm == 4)
// printf("mod_type = %s; \n", "'16qam'");
//else if (Qm == 2)
// printf("mod_type = %s; \n", "'qpsk'");
//else if (Qm == 1)
// printf("mod_type = %s; \n", "'bpsk'");
//
//printf("N_sc_rb = 12; %% number of subcarriers in each resource block\n");
//printf("M_pusch_rb = %d; %% number of resource blocks assigned to the UE\n", M_pusch_rb);
//printf("M_pusch_sc = M_pusch_rb*N_sc_rb; %% total number of subcarriers\n\n");
//printf("N_l = %d; \nQ_m = %d; \ndata_bits = (fread(fopen('%s')) - '0').';\nri_bits = (fread(fopen('%s'))-'0').'; \n", N_l, Qm, argv[1], argv[2]);
//printf("interleaved_bits = channel_interleaver(data_bits, ri_bits, [], Q_m, N_l); \nc_init = 10 * 2 ^ 14 + floor(0 / 2) * 2 ^ 9 + 2; \nc = generate_psuedo_random_seq(c_init, N_bits); \nb_scrampled = scrambler(interleaved_bits, c); \nmapped = mapper(b_scrampled, mod_type); \nprecoded_data = transform_precoder(mapped, M_pusch_rb); \n\ndmrs = generate_dmrs_pusch(0, 2, 0, 0, 0, 0, 0, 'fixed', M_pusch_rb, 0);\ndmrs_1 = dmrs(1:M_pusch_sc);\ndmrs_2 = dmrs(M_pusch_sc+1:2*M_pusch_sc);\nsubframe_1 = compose_subframe(precoded_data, dmrs_1, dmrs_2, M_pusch_rb);\nsymbols_MATLAB = sc_fdma_modulator(subframe_1, M_pusch_rb);\n\nsum((abs(symbols_MATLAB) - abs(symbols_CUDA)))");
//fclose(results);
// Free allocated memory
// free device arrays
hipFree(inputBits_d);
hipFree(riBits_d);
hipFree(c_d);
hipFree(y_idx_d);
hipFree(y_mat_d);
hipFree(interleaved_d);
hipFree(scrambledbits_d);
hipFree(bits_each_Qm_d);
hipFree(cuComplex_symbols_d);
hipFree(precoded_symbols_d);
hipFree(dmrs_1_d);
hipFree(dmrs_2_d);
hipFree(x_q_d);
hipFree(subframe_d);
hipFree(ifft_vec_d);
hipFree(pusch_bb_d);
// free host arrays
//free(inputBits_h);
}
|
c02cf8b6c30f4756222f607daced9e568a4aa299.cu
|
#include "main.cuh"
int main(int argc, char **argv) {
// Create default stream for the chain
cudaStream_t stream_dmrs;
cudaStreamCreate(&stream_default);
cudaStreamCreate(&stream_dmrs);
//For timing purpose
timerInit();
startTimer();
int N_bits, N_ri;
const int Qm = 6; // 64QAM Modulation
const int N_l = 1; // Number of Layers
// Physical layer cell identity (we need for generation of random sequence)
int N_id_cell = 2; // assume enodeB scheduled cell 2 for the UE
int M_pusch_rb = 100; // number of resource blocks assigned to the UE
int n_s = 0; // assume UE send on time slot 0
int n_RNTI = 10; // radio network temporary identifier given to the UE by enodeB(assume 10)
// (UNUSED) int N_subfr = 0; // Subframe number within a radio frame
BYTE* inputBits_h = readBits(argc, argv[1], &N_bits); //Get input bits from the text file
BYTE* riBits_h = readBits(argc, argv[2], &N_ri); //Get RI bits from the text file
//cudaMalloc & cudaMemcpy for inputBits & RI_Bits to Device
Byte *inputBits_d = 0, *riBits_d = 0;
cudaMalloc((void **)&inputBits_d, sizeof(Byte)*N_bits);
cudaMalloc((void **)&riBits_d, sizeof(Byte)*N_ri);
Byte* c_d = 0;
cudaMalloc((void **)&c_d, sizeof(Byte)*N_bits);
// Copy data to the device using different stream
cudaStream_t stream_mem;
cudaStreamCreate(&stream_mem);
cudaMemcpyAsync(inputBits_d, inputBits_h, sizeof(Byte)*N_bits, cudaMemcpyHostToDevice, stream_mem);
cudaMemcpyAsync(riBits_d, riBits_h, sizeof(Byte)*N_ri, cudaMemcpyHostToDevice, stream_default);
stopTimer("cudaMalloc & cudaMemcpy for inputBits & RI_Bits Time= %.6f ms\n", elapsed);
//Create Plans
startTimer();
cufftHandle plan_transform_precoder;
int n[1] = { N_sc_rb*M_pusch_rb };
cufftPlanMany(&plan_transform_precoder, 1, n, NULL, 1, n[0], NULL, 1, N_sc_rb*M_pusch_rb, CUFFT_C2C, ((N_bits + N_ri) / Qm)/n[0]);
cufftHandle plan_sc_fdma;
n[0] = { FFT_size };
cufftPlanMany(&plan_sc_fdma, 1, n, NULL, 1, FFT_size, NULL, 1, FFT_size, CUFFT_C2C, N_symbs_per_subframe);
stopTimer("Create Plans Time= %.6f ms\n", elapsed);
//Device data allocation
startTimer();
//timer_test << <1, 1 >> > ();
int data_vec_len = Qm*N_l;
// (UNUSED) int ri_vec_len = Qm*N_l;
int N_data_bits = N_bits / data_vec_len;
int N_ri_bits = N_ri / data_vec_len;
int H_prime = N_data_bits;
// (UNUSED) int H_vec_len = data_vec_len;
int H_prime_total = H_prime + N_ri_bits;
int R_mux = (H_prime_total*Qm*N_l) / N_pusch_symbs;
int R_prime_mux = R_mux / (Qm*N_l);
Byte *y_idx_d, *y_mat_d, *interleaved_d;
cudaMalloc((void **)&y_idx_d, sizeof(Byte)*(N_pusch_symbs * R_prime_mux));
cudaMalloc((void **)&y_mat_d, sizeof(Byte)*(N_pusch_symbs*R_mux));
cudaMalloc((void **)&interleaved_d, sizeof(Byte)*(N_pusch_symbs*R_mux));
Byte *scrambledbits_d = 0;
cudaMalloc((void **)&scrambledbits_d, sizeof(Byte)*N_bits);
Byte *bits_each_Qm_d;
//float* symbols_R_d = 0, *symbols_I_d = 0;
cudaMalloc((void **)&bits_each_Qm_d, sizeof(Byte)*(N_bits / Qm));
//cudaMalloc((void **)&symbols_R_d, sizeof(float)*(N_bits / Qm));
//cudaMalloc((void **)&symbols_I_d, sizeof(float)*(N_bits / Qm));
cufftComplex *precoded_symbols_d = 0, *cuComplex_symbols_d = 0;
cudaMalloc((void **)&cuComplex_symbols_d, sizeof(cufftComplex)*(N_bits / Qm));
cudaMalloc((void **)&precoded_symbols_d, sizeof(cufftComplex)*(N_bits / Qm));
cufftComplex* x_q_d;
cufftComplex* dmrs_1_d = 0, *dmrs_2_d = 0;
cudaMalloc((void **)&dmrs_1_d, sizeof(cufftComplex)*N_sc_rb*M_pusch_rb);
cudaMalloc((void **)&dmrs_2_d, sizeof(cufftComplex)*N_sc_rb*M_pusch_rb);
cudaMalloc((void **)&x_q_d, sizeof(cufftComplex)*prime_nums[M_pusch_rb - 1]);
cufftComplex *subframe_d = 0;
cudaMalloc((void **)&subframe_d, sizeof(cufftComplex)*N_symbs_per_subframe*N_sc_rb*M_pusch_rb);
cufftComplex* ifft_vec_d;
cufftComplex *pusch_bb_d = 0;
cudaMalloc((void **)&ifft_vec_d, sizeof(cufftComplex)*N_symbs_per_subframe*FFT_size);
cudaMalloc((void **)&pusch_bb_d, sizeof(cufftComplex)*modulated_subframe_length);
stopTimer("Device data allocation Time= %.6f ms\n", elapsed);
/*startTimer();
stopTimer("Overhead of timer= %.6f ms\n", elapsed);*/
int times = 1;
startTimer();
//Generate Pseudo Random Seq.
Byte *c_h = 0;
generate_psuedo_random_seq(&c_h, N_bits, n_RNTI, n_s, N_id_cell);
//Copy (c) to Device
cudaMemcpyAsync(c_d, c_h, sizeof(Byte)*N_bits, cudaMemcpyHostToDevice, stream_default);
//for (int i = 0; i < times; i++)
//{
//Interleaver
interleaver(inputBits_d, riBits_d, &interleaved_d, N_bits, N_ri, Qm, N_l, y_idx_d, y_mat_d);
//Generate DMRS
generate_dmrs_pusch(0, N_id_cell, 0, 0, 0, 0, 0, "fixed", M_pusch_rb, 0, &dmrs_1_d, &dmrs_2_d, x_q_d, stream_dmrs);
//Scrambler
scrambler(interleaved_d, &scrambledbits_d, c_d, N_bits + N_ri);
//Mapper
mapper(scrambledbits_d, N_bits + N_ri, Qm, M_pusch_rb, cuComplex_symbols_d, bits_each_Qm_d); // Mohammed
//Transform Precoder
transform_precoder(&precoded_symbols_d, plan_transform_precoder, cuComplex_symbols_d);
//Multiplexing the DMRS with the Data
compose_subframe(precoded_symbols_d, dmrs_1_d, dmrs_2_d, M_pusch_rb, &subframe_d);
// Generate SC-FDMA signal
sc_fdma_modulator(subframe_d, M_pusch_rb, &pusch_bb_d, plan_sc_fdma, ifft_vec_d);
//timer_test << <1, 1 >> > ();
//startTimer();
cufftComplex *pusch_bb_h = (cufftComplex *)malloc(sizeof(cufftComplex)*(30720));
cudaMemcpyAsync(pusch_bb_h, pusch_bb_d, sizeof(cufftComplex)*(30720), cudaMemcpyDeviceToHost, stream_default);
//}
stopTimer("Processing Time= %.6f ms\n", elapsed/ times);
//To compare with MATLAB results
//Run the file (output.m)
//int NNN = modulated_subframe_length;
//FILE *results;
//if ((results = freopen("output.m", "w+", stdout)) == NULL) {
// printf("Cannot open file.\n");
// exit(1);
//}
//printf("clear; clc;\nsymbols_real = [ ");
//for (int i = 0; i < NNN; i++)
//{
// printf("%10f", pusch_bb_h[i].x);
// if (i != (NNN -1))
// printf(",");
//}
//printf(" ];\nsymbols_imag = [ ");
//for (int i = 0; i < NNN; i++)
//{
// printf("%10f", pusch_bb_h[i].y);
// if (i != (NNN -1))
// printf(",");
//}
//printf(" ];\n");
//printf("symbols_CUDA = symbols_real + 1i * symbols_imag;\n");
////Matlab code
//printf("matlab_test");
//fclose(results);
//if ((results = freopen("matlab_test.m", "w+", stdout)) == NULL) {
// printf("Cannot open file.\n");
// exit(1);
//}
//printf("N_bits = %d; \n", N_bits);
//if(Qm == 6)
// printf("mod_type = %s; \n", "'64qam'");
//else if (Qm == 4)
// printf("mod_type = %s; \n", "'16qam'");
//else if (Qm == 2)
// printf("mod_type = %s; \n", "'qpsk'");
//else if (Qm == 1)
// printf("mod_type = %s; \n", "'bpsk'");
//
//printf("N_sc_rb = 12; %% number of subcarriers in each resource block\n");
//printf("M_pusch_rb = %d; %% number of resource blocks assigned to the UE\n", M_pusch_rb);
//printf("M_pusch_sc = M_pusch_rb*N_sc_rb; %% total number of subcarriers\n\n");
//printf("N_l = %d; \nQ_m = %d; \ndata_bits = (fread(fopen('%s')) - '0').';\nri_bits = (fread(fopen('%s'))-'0').'; \n", N_l, Qm, argv[1], argv[2]);
//printf("interleaved_bits = channel_interleaver(data_bits, ri_bits, [], Q_m, N_l); \nc_init = 10 * 2 ^ 14 + floor(0 / 2) * 2 ^ 9 + 2; \nc = generate_psuedo_random_seq(c_init, N_bits); \nb_scrampled = scrambler(interleaved_bits, c); \nmapped = mapper(b_scrampled, mod_type); \nprecoded_data = transform_precoder(mapped, M_pusch_rb); \n\ndmrs = generate_dmrs_pusch(0, 2, 0, 0, 0, 0, 0, 'fixed', M_pusch_rb, 0);\ndmrs_1 = dmrs(1:M_pusch_sc);\ndmrs_2 = dmrs(M_pusch_sc+1:2*M_pusch_sc);\nsubframe_1 = compose_subframe(precoded_data, dmrs_1, dmrs_2, M_pusch_rb);\nsymbols_MATLAB = sc_fdma_modulator(subframe_1, M_pusch_rb);\n\nsum((abs(symbols_MATLAB) - abs(symbols_CUDA)))");
//fclose(results);
// Free allocated memory
// free device arrays
cudaFree(inputBits_d);
cudaFree(riBits_d);
cudaFree(c_d);
cudaFree(y_idx_d);
cudaFree(y_mat_d);
cudaFree(interleaved_d);
cudaFree(scrambledbits_d);
cudaFree(bits_each_Qm_d);
cudaFree(cuComplex_symbols_d);
cudaFree(precoded_symbols_d);
cudaFree(dmrs_1_d);
cudaFree(dmrs_2_d);
cudaFree(x_q_d);
cudaFree(subframe_d);
cudaFree(ifft_vec_d);
cudaFree(pusch_bb_d);
// free host arrays
//free(inputBits_h);
}
|
16ac66015054fdffeaab467072a31b59f483666b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2018 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: jglaser
/*! \file SFCPackUpdaterGPU.cu
\brief Defines GPU kernel code for generating the space-filling curve sorted order on the GPU. Used by SFCPackUpdaterGPU.
*/
#include "SFCPackUpdaterGPU.cuh"
#include "hoomd/extern/kernels/mergesort.cuh"
//! Kernel to bin particles
template<bool twod>
__global__ void gpu_sfc_bin_particles_kernel(unsigned int N,
const Scalar4 *d_pos,
unsigned int *d_particle_bins,
const unsigned int *d_traversal_order,
unsigned int n_grid,
unsigned int *d_sorted_order,
const BoxDim box)
{
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= N) return;
// fetch particle position
Scalar4 postype = d_pos[idx];
Scalar3 p = make_scalar3(postype.x, postype.y, postype.z);
Scalar3 f = box.makeFraction(p);
int ib = (unsigned int)(f.x * n_grid) % n_grid;
int jb = (unsigned int)(f.y * n_grid) % n_grid;
int kb = (unsigned int)(f.z * n_grid) % n_grid;
// if the particle is slightly outside, move back into grid
if (ib < 0) ib = 0;
if (ib >= n_grid) ib = n_grid - 1;
if (jb < 0) jb = 0;
if (jb >= n_grid) jb = n_grid - 1;
if (kb < 0) kb = 0;
if (kb >= n_grid) kb = n_grid - 1;
// record its bin
unsigned int bin;
if (twod)
{
// do not use Hilbert curve in 2D
bin = ib*n_grid + jb;
d_particle_bins[idx] = bin;
}
else
{
bin = ib*(n_grid*n_grid) + jb * n_grid + kb;
d_particle_bins[idx] = d_traversal_order[bin];
}
// store index of ptl
d_sorted_order[idx] = idx;
}
/*! \param N number of local particles
\param d_pos Device array of positions
\param d_particle_bins Device array of particle bins
\param d_traversal_order Device array of Hilbert-curve bins
\param n_grid Number of grid elements along one edge
\param d_sorted_order Sorted order of particles
\param box Box dimensions
\param twod If true, bin particles in two dimensions
*/
void gpu_generate_sorted_order(unsigned int N,
const Scalar4 *d_pos,
unsigned int *d_particle_bins,
unsigned int *d_traversal_order,
unsigned int n_grid,
unsigned int *d_sorted_order,
const BoxDim& box,
bool twod,
mgpu::ContextPtr mgpu_context)
{
// maybe need to autotune, but SFCPackUpdater is called infrequently
unsigned int block_size = 512;
unsigned int n_blocks = N/block_size + 1;
if (twod)
hipLaunchKernelGGL(( gpu_sfc_bin_particles_kernel<true>), dim3(n_blocks), dim3(block_size), 0, 0, N, d_pos, d_particle_bins, d_traversal_order, n_grid, d_sorted_order, box);
else
hipLaunchKernelGGL(( gpu_sfc_bin_particles_kernel<false>), dim3(n_blocks), dim3(block_size), 0, 0, N, d_pos, d_particle_bins, d_traversal_order, n_grid, d_sorted_order, box);
// Sort particles
if (N)
mgpu::MergesortPairs(d_particle_bins, d_sorted_order, N, *mgpu_context);
}
//! Kernel to apply sorted order
__global__ void gpu_apply_sorted_order_kernel(
unsigned int N,
unsigned int n_ghost,
const unsigned int *d_sorted_order,
const Scalar4 *d_pos,
Scalar4 *d_pos_alt,
const Scalar4 *d_vel,
Scalar4 *d_vel_alt,
const Scalar3 *d_accel,
Scalar3 *d_accel_alt,
const Scalar *d_charge,
Scalar *d_charge_alt,
const Scalar *d_diameter,
Scalar *d_diameter_alt,
const int3 *d_image,
int3 *d_image_alt,
const unsigned int *d_body,
unsigned int *d_body_alt,
const unsigned int *d_tag,
unsigned int *d_tag_alt,
const Scalar4 *d_orientation,
Scalar4 *d_orientation_alt,
const Scalar4 *d_angmom,
Scalar4 *d_angmom_alt,
const Scalar3 *d_inertia,
Scalar3 *d_inertia_alt,
const Scalar *d_net_virial,
Scalar *d_net_virial_alt,
unsigned int virial_pitch,
const Scalar4 *d_net_force,
Scalar4 *d_net_force_alt,
const Scalar4 *d_net_torque,
Scalar4 *d_net_torque_alt,
unsigned int *d_rtag)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N+n_ghost) return;
// apply sorted order only for local ptls
unsigned int old_idx = (idx < N ? d_sorted_order[idx] : idx);
// permute and copy over particle data
d_pos_alt[idx] = d_pos[old_idx];
d_vel_alt[idx] = d_vel[old_idx];
d_accel_alt[idx] = d_accel[old_idx];
d_charge_alt[idx] = d_charge[old_idx];
d_diameter_alt[idx] = d_diameter[old_idx];
d_image_alt[idx] = d_image[old_idx];
d_body_alt[idx] = d_body[old_idx];
unsigned int tag = d_tag[old_idx];
d_tag_alt[idx] = tag;
d_orientation_alt[idx] = d_orientation[old_idx];
d_angmom_alt[idx] = d_angmom[old_idx];
d_inertia_alt[idx] = d_inertia[old_idx];
d_net_virial_alt[0*virial_pitch+idx] = d_net_virial[0*virial_pitch+old_idx];
d_net_virial_alt[1*virial_pitch+idx] = d_net_virial[1*virial_pitch+old_idx];
d_net_virial_alt[2*virial_pitch+idx] = d_net_virial[2*virial_pitch+old_idx];
d_net_virial_alt[3*virial_pitch+idx] = d_net_virial[3*virial_pitch+old_idx];
d_net_virial_alt[4*virial_pitch+idx] = d_net_virial[4*virial_pitch+old_idx];
d_net_virial_alt[5*virial_pitch+idx] = d_net_virial[5*virial_pitch+old_idx];
d_net_force_alt[idx] = d_net_force[old_idx];
d_net_torque_alt[idx] = d_net_torque[old_idx];
if (idx < N)
{
// update rtag to point to particle position in new arrays
d_rtag[tag] = idx;
}
}
void gpu_apply_sorted_order(
unsigned int N,
unsigned int n_ghost,
const unsigned int *d_sorted_order,
const Scalar4 *d_pos,
Scalar4 *d_pos_alt,
const Scalar4 *d_vel,
Scalar4 *d_vel_alt,
const Scalar3 *d_accel,
Scalar3 *d_accel_alt,
const Scalar *d_charge,
Scalar *d_charge_alt,
const Scalar *d_diameter,
Scalar *d_diameter_alt,
const int3 *d_image,
int3 *d_image_alt,
const unsigned int *d_body,
unsigned int *d_body_alt,
const unsigned int *d_tag,
unsigned int *d_tag_alt,
const Scalar4 *d_orientation,
Scalar4 *d_orientation_alt,
const Scalar4 *d_angmom,
Scalar4 *d_angmom_alt,
const Scalar3 *d_inertia,
Scalar3 *d_inertia_alt,
const Scalar *d_net_virial,
Scalar *d_net_virial_alt,
unsigned int virial_pitch,
const Scalar4 *d_net_force,
Scalar4 *d_net_force_alt,
const Scalar4 *d_net_torque,
Scalar4 *d_net_torque_alt,
unsigned int *d_rtag
)
{
unsigned int block_size = 512;
unsigned int n_blocks = (N+n_ghost)/block_size + 1;
hipLaunchKernelGGL(( gpu_apply_sorted_order_kernel), dim3(n_blocks), dim3(block_size), 0, 0, N,
n_ghost,
d_sorted_order,
d_pos,
d_pos_alt,
d_vel,
d_vel_alt,
d_accel,
d_accel_alt,
d_charge,
d_charge_alt,
d_diameter,
d_diameter_alt,
d_image,
d_image_alt,
d_body,
d_body_alt,
d_tag,
d_tag_alt,
d_orientation,
d_orientation_alt,
d_angmom,
d_angmom_alt,
d_inertia,
d_inertia_alt,
d_net_virial,
d_net_virial_alt,
virial_pitch,
d_net_force,
d_net_force_alt,
d_net_torque,
d_net_torque_alt,
d_rtag);
}
|
16ac66015054fdffeaab467072a31b59f483666b.cu
|
// Copyright (c) 2009-2018 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: jglaser
/*! \file SFCPackUpdaterGPU.cu
\brief Defines GPU kernel code for generating the space-filling curve sorted order on the GPU. Used by SFCPackUpdaterGPU.
*/
#include "SFCPackUpdaterGPU.cuh"
#include "hoomd/extern/kernels/mergesort.cuh"
//! Kernel to bin particles
template<bool twod>
__global__ void gpu_sfc_bin_particles_kernel(unsigned int N,
const Scalar4 *d_pos,
unsigned int *d_particle_bins,
const unsigned int *d_traversal_order,
unsigned int n_grid,
unsigned int *d_sorted_order,
const BoxDim box)
{
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= N) return;
// fetch particle position
Scalar4 postype = d_pos[idx];
Scalar3 p = make_scalar3(postype.x, postype.y, postype.z);
Scalar3 f = box.makeFraction(p);
int ib = (unsigned int)(f.x * n_grid) % n_grid;
int jb = (unsigned int)(f.y * n_grid) % n_grid;
int kb = (unsigned int)(f.z * n_grid) % n_grid;
// if the particle is slightly outside, move back into grid
if (ib < 0) ib = 0;
if (ib >= n_grid) ib = n_grid - 1;
if (jb < 0) jb = 0;
if (jb >= n_grid) jb = n_grid - 1;
if (kb < 0) kb = 0;
if (kb >= n_grid) kb = n_grid - 1;
// record its bin
unsigned int bin;
if (twod)
{
// do not use Hilbert curve in 2D
bin = ib*n_grid + jb;
d_particle_bins[idx] = bin;
}
else
{
bin = ib*(n_grid*n_grid) + jb * n_grid + kb;
d_particle_bins[idx] = d_traversal_order[bin];
}
// store index of ptl
d_sorted_order[idx] = idx;
}
/*! \param N number of local particles
\param d_pos Device array of positions
\param d_particle_bins Device array of particle bins
\param d_traversal_order Device array of Hilbert-curve bins
\param n_grid Number of grid elements along one edge
\param d_sorted_order Sorted order of particles
\param box Box dimensions
\param twod If true, bin particles in two dimensions
*/
void gpu_generate_sorted_order(unsigned int N,
const Scalar4 *d_pos,
unsigned int *d_particle_bins,
unsigned int *d_traversal_order,
unsigned int n_grid,
unsigned int *d_sorted_order,
const BoxDim& box,
bool twod,
mgpu::ContextPtr mgpu_context)
{
// maybe need to autotune, but SFCPackUpdater is called infrequently
unsigned int block_size = 512;
unsigned int n_blocks = N/block_size + 1;
if (twod)
gpu_sfc_bin_particles_kernel<true><<<n_blocks, block_size>>>(N, d_pos, d_particle_bins, d_traversal_order, n_grid, d_sorted_order, box);
else
gpu_sfc_bin_particles_kernel<false><<<n_blocks, block_size>>>(N, d_pos, d_particle_bins, d_traversal_order, n_grid, d_sorted_order, box);
// Sort particles
if (N)
mgpu::MergesortPairs(d_particle_bins, d_sorted_order, N, *mgpu_context);
}
//! Kernel to apply sorted order
__global__ void gpu_apply_sorted_order_kernel(
unsigned int N,
unsigned int n_ghost,
const unsigned int *d_sorted_order,
const Scalar4 *d_pos,
Scalar4 *d_pos_alt,
const Scalar4 *d_vel,
Scalar4 *d_vel_alt,
const Scalar3 *d_accel,
Scalar3 *d_accel_alt,
const Scalar *d_charge,
Scalar *d_charge_alt,
const Scalar *d_diameter,
Scalar *d_diameter_alt,
const int3 *d_image,
int3 *d_image_alt,
const unsigned int *d_body,
unsigned int *d_body_alt,
const unsigned int *d_tag,
unsigned int *d_tag_alt,
const Scalar4 *d_orientation,
Scalar4 *d_orientation_alt,
const Scalar4 *d_angmom,
Scalar4 *d_angmom_alt,
const Scalar3 *d_inertia,
Scalar3 *d_inertia_alt,
const Scalar *d_net_virial,
Scalar *d_net_virial_alt,
unsigned int virial_pitch,
const Scalar4 *d_net_force,
Scalar4 *d_net_force_alt,
const Scalar4 *d_net_torque,
Scalar4 *d_net_torque_alt,
unsigned int *d_rtag)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N+n_ghost) return;
// apply sorted order only for local ptls
unsigned int old_idx = (idx < N ? d_sorted_order[idx] : idx);
// permute and copy over particle data
d_pos_alt[idx] = d_pos[old_idx];
d_vel_alt[idx] = d_vel[old_idx];
d_accel_alt[idx] = d_accel[old_idx];
d_charge_alt[idx] = d_charge[old_idx];
d_diameter_alt[idx] = d_diameter[old_idx];
d_image_alt[idx] = d_image[old_idx];
d_body_alt[idx] = d_body[old_idx];
unsigned int tag = d_tag[old_idx];
d_tag_alt[idx] = tag;
d_orientation_alt[idx] = d_orientation[old_idx];
d_angmom_alt[idx] = d_angmom[old_idx];
d_inertia_alt[idx] = d_inertia[old_idx];
d_net_virial_alt[0*virial_pitch+idx] = d_net_virial[0*virial_pitch+old_idx];
d_net_virial_alt[1*virial_pitch+idx] = d_net_virial[1*virial_pitch+old_idx];
d_net_virial_alt[2*virial_pitch+idx] = d_net_virial[2*virial_pitch+old_idx];
d_net_virial_alt[3*virial_pitch+idx] = d_net_virial[3*virial_pitch+old_idx];
d_net_virial_alt[4*virial_pitch+idx] = d_net_virial[4*virial_pitch+old_idx];
d_net_virial_alt[5*virial_pitch+idx] = d_net_virial[5*virial_pitch+old_idx];
d_net_force_alt[idx] = d_net_force[old_idx];
d_net_torque_alt[idx] = d_net_torque[old_idx];
if (idx < N)
{
// update rtag to point to particle position in new arrays
d_rtag[tag] = idx;
}
}
void gpu_apply_sorted_order(
unsigned int N,
unsigned int n_ghost,
const unsigned int *d_sorted_order,
const Scalar4 *d_pos,
Scalar4 *d_pos_alt,
const Scalar4 *d_vel,
Scalar4 *d_vel_alt,
const Scalar3 *d_accel,
Scalar3 *d_accel_alt,
const Scalar *d_charge,
Scalar *d_charge_alt,
const Scalar *d_diameter,
Scalar *d_diameter_alt,
const int3 *d_image,
int3 *d_image_alt,
const unsigned int *d_body,
unsigned int *d_body_alt,
const unsigned int *d_tag,
unsigned int *d_tag_alt,
const Scalar4 *d_orientation,
Scalar4 *d_orientation_alt,
const Scalar4 *d_angmom,
Scalar4 *d_angmom_alt,
const Scalar3 *d_inertia,
Scalar3 *d_inertia_alt,
const Scalar *d_net_virial,
Scalar *d_net_virial_alt,
unsigned int virial_pitch,
const Scalar4 *d_net_force,
Scalar4 *d_net_force_alt,
const Scalar4 *d_net_torque,
Scalar4 *d_net_torque_alt,
unsigned int *d_rtag
)
{
unsigned int block_size = 512;
unsigned int n_blocks = (N+n_ghost)/block_size + 1;
gpu_apply_sorted_order_kernel<<<n_blocks, block_size>>>(N,
n_ghost,
d_sorted_order,
d_pos,
d_pos_alt,
d_vel,
d_vel_alt,
d_accel,
d_accel_alt,
d_charge,
d_charge_alt,
d_diameter,
d_diameter_alt,
d_image,
d_image_alt,
d_body,
d_body_alt,
d_tag,
d_tag_alt,
d_orientation,
d_orientation_alt,
d_angmom,
d_angmom_alt,
d_inertia,
d_inertia_alt,
d_net_virial,
d_net_virial_alt,
virial_pitch,
d_net_force,
d_net_force_alt,
d_net_torque,
d_net_torque_alt,
d_rtag);
}
|
c22775abffb27f1b7c8d517e5557fcd27e7aeb36.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <caffepro/layers/lrn_layer.h>
#include <caffepro/utils/utils.h>
#include <caffepro/proto/caffe.pb.h>
namespace caffepro {
lrn_layer::lrn_layer(caffepro_context *context, const LayerParameter ¶m)
: caffepro_layer(context, param) {
attr_.num_inputs_min = attr_.num_inputs_max = 1;
attr_.num_outputs_min = attr_.num_outputs_max = 1;
attr_.set_constraint(
layer_attribute::CF_REQUIRE_SAME_DIMTYPE_ACROSS_DEVICES
| layer_attribute::CF_FORBID_INPLACE_USAGE_NEXT_ALWAYS
| layer_attribute::CF_REQUIRE_NDIM_4
| layer_attribute::CF_REQUIRE_FIXEDLEN_DIM // remove it in the future
);
}
lrn_layer::~lrn_layer() {
release_all();
}
void lrn_layer::init() {
check_input();
size_ = layer_param_.lrn_param().local_size();
pre_pad_ = (size_ - 1) / 2;
alpha_ = this->layer_param_.lrn_param().alpha();
beta_ = this->layer_param_.lrn_param().beta();
scale_.reset(new node_blob());
}
void lrn_layer::resize() {
bool init = (outputs_[0]->size() == 0);
caffepro_layer::resize();
int n_devices = (int)inputs_[0]->size();
for (int nd = 0; nd < n_devices; nd++) {
if (init) {
scale_->add_like(*inputs_[0]->get(nd));
}
else if (inputs_[0]->get(nd)->reshaped()) {
scale_->get(nd)->reshape_like(*inputs_[0]->get(nd));
}
}
}
__global__ static void lrn_fill_scale(const int nthreads, const data_type* in, const int num, const int channels, const int height,
const int width, const int size, const data_type alpha_over_size, data_type* scale) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int n = index / width / height;
int offset = (n * channels * height + h) * width + w;
int step = height * width;
in += offset;
scale += offset;
int head = 0;
int pre_pad = (size - 1) / 2;
int post_pad = size - pre_pad - 1;
data_type accum_scale = 0;
// fill the scale at [n, :, h, w]
// accumulate values
while (head < post_pad && head < channels) {
accum_scale += in[head * step] * in[head * step];
++head;
}
// until we reach size, nothing needs to be subtracted
while (head < size && head < channels) {
accum_scale += in[head * step] * in[head * step];
scale[(head - post_pad) * step] = 1. + accum_scale * alpha_over_size;
++head;
}
// both add and subtract
while (head < channels) {
accum_scale += in[head * step] * in[head * step];
accum_scale -= in[(head - size) * step] * in[(head - size) * step];
scale[(head - post_pad) * step] = 1. + accum_scale * alpha_over_size;
++head;
}
// subtract only
while (head < channels + post_pad) {
if (head >= size) {
accum_scale -= in[(head - size) * step] * in[(head - size) * step];
}
if (head >= post_pad) {
scale[(head - post_pad) * step] = 1. + accum_scale * alpha_over_size;
}
++head;
}
}
}
__global__ static void lrn_compute_output(const int nthreads, const data_type* in,
const data_type* scale, const data_type negative_beta, data_type* out) {
CUDA_KERNEL_LOOP(index, nthreads) {
out[index] = in[index] * pow(scale[index], negative_beta);
}
}
void lrn_layer::on_forward(int device_index) {
const data_type* input_data = inputs_[0]->get(device_index)->gpu_data();
data_type* output_data = outputs_[0]->get(device_index)->mutable_gpu_data();
data_type* scale_data = scale_->get(device_index)->mutable_gpu_data();
auto &inputs = *inputs_[0]->get(device_index);
// First, compute scale
// We will launch one kernel for each pixel location, and have the kernel
// go through all the channels.
int n_threads = inputs_[0]->get(device_index)->count() / inputs_[0]->get(device_index)->channels();
KERNEL_CALL(lrn_fill_scale, n_threads)(
n_threads,
input_data,
inputs.num(),
inputs.channels(),
inputs.height(),
inputs.width(),
size_,
alpha_ / size_,
scale_data
);
// then forward
n_threads = inputs_[0]->get(device_index)->count();
KERNEL_CALL(lrn_compute_output, n_threads)(n_threads, input_data, scale_data, -beta_, output_data);
CUDA_POST_KERNEL_CHECK;
}
__global__ static void lrn_compute_diff(const int nthreads, const data_type* bottom_data, const data_type* top_data, const data_type* scale, const data_type* top_diff,
const int num, const int channels, const int height, const int width, const int size, const data_type negative_beta,
const data_type cache_ratio, data_type* bottom_diff, const data_type scale_targets) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int n = index / width / height;
int offset = (n * channels * height + h) * width + w;
int step = height * width;
bottom_data += offset;
top_data += offset;
scale += offset;
top_diff += offset;
bottom_diff += offset;
int head = 0;
int pre_pad = size - (size + 1) / 2;
int post_pad = size - pre_pad - 1;
data_type accum_ratio = 0;
// accumulate values
while (head < post_pad && head < channels) {
accum_ratio += top_diff[head * step] * top_data[head * step] /
scale[head * step];
++head;
}
// until we reach size, nothing needs to be subtracted
while (head < size && head < channels) {
accum_ratio += top_diff[head * step] * top_data[head * step] /
scale[head * step];
bottom_diff[(head - post_pad) * step] = bottom_diff[(head - post_pad) * step] * scale_targets
+ top_diff[(head - post_pad) * step]
* pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio *
bottom_data[(head - post_pad) * step] * accum_ratio;
++head;
}
// both add and subtract
while (head < channels) {
accum_ratio += top_diff[head * step] * top_data[head * step] /
scale[head * step];
accum_ratio -= top_diff[(head - size) * step] *
top_data[(head - size) * step] / scale[(head - size) * step];
bottom_diff[(head - post_pad) * step] = bottom_diff[(head - post_pad) * step] * scale_targets
+ top_diff[(head - post_pad) * step]
* pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio *
bottom_data[(head - post_pad) * step] * accum_ratio;
++head;
}
// subtract only
while (head < channels + post_pad) {
if (head >= size) {
accum_ratio -= top_diff[(head - size) * step] *
top_data[(head - size) * step] / scale[(head - size) * step];
}
if (head >= post_pad) {
bottom_diff[(head - post_pad) * step] = bottom_diff[(head - post_pad) * step] * scale_targets
+ top_diff[(head - post_pad) * step]
* pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio *
bottom_data[(head - post_pad) * step] * accum_ratio;
}
++head;
}
}
}
void lrn_layer::on_backward(int device_index, act_selector bp_acts, weight_selector bp_weights, act_selector clear_acts_diff, weight_selector clear_weights_diff) {
const data_type beta_acts = get_beta(clear_acts_diff, 0);
if (should_bp(bp_acts, 0)) {
auto &inputs = *inputs_[0]->get(device_index);
int n_threads = inputs.count() / inputs.channels();
if (beta_acts == 0) {
inputs.fill_diff(0.f);
}
KERNEL_CALL(lrn_compute_diff, n_threads)(
n_threads,
inputs.gpu_data(),
outputs_[0]->get(device_index)->gpu_data(),
scale_->get(device_index)->gpu_data(),
outputs_[0]->get(device_index)->gpu_diff(),
inputs.num(),
inputs.channels(),
inputs.height(),
inputs.width(),
size_,
-beta_,
(data_type)(2. * alpha_ * beta_ / size_),
inputs.mutable_gpu_diff(),
beta_acts
);
CUDA_POST_KERNEL_CHECK;
}
}
}
|
c22775abffb27f1b7c8d517e5557fcd27e7aeb36.cu
|
#include <caffepro/layers/lrn_layer.h>
#include <caffepro/utils/utils.h>
#include <caffepro/proto/caffe.pb.h>
namespace caffepro {
lrn_layer::lrn_layer(caffepro_context *context, const LayerParameter ¶m)
: caffepro_layer(context, param) {
attr_.num_inputs_min = attr_.num_inputs_max = 1;
attr_.num_outputs_min = attr_.num_outputs_max = 1;
attr_.set_constraint(
layer_attribute::CF_REQUIRE_SAME_DIMTYPE_ACROSS_DEVICES
| layer_attribute::CF_FORBID_INPLACE_USAGE_NEXT_ALWAYS
| layer_attribute::CF_REQUIRE_NDIM_4
| layer_attribute::CF_REQUIRE_FIXEDLEN_DIM // remove it in the future
);
}
lrn_layer::~lrn_layer() {
release_all();
}
void lrn_layer::init() {
check_input();
size_ = layer_param_.lrn_param().local_size();
pre_pad_ = (size_ - 1) / 2;
alpha_ = this->layer_param_.lrn_param().alpha();
beta_ = this->layer_param_.lrn_param().beta();
scale_.reset(new node_blob());
}
void lrn_layer::resize() {
bool init = (outputs_[0]->size() == 0);
caffepro_layer::resize();
int n_devices = (int)inputs_[0]->size();
for (int nd = 0; nd < n_devices; nd++) {
if (init) {
scale_->add_like(*inputs_[0]->get(nd));
}
else if (inputs_[0]->get(nd)->reshaped()) {
scale_->get(nd)->reshape_like(*inputs_[0]->get(nd));
}
}
}
__global__ static void lrn_fill_scale(const int nthreads, const data_type* in, const int num, const int channels, const int height,
const int width, const int size, const data_type alpha_over_size, data_type* scale) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int n = index / width / height;
int offset = (n * channels * height + h) * width + w;
int step = height * width;
in += offset;
scale += offset;
int head = 0;
int pre_pad = (size - 1) / 2;
int post_pad = size - pre_pad - 1;
data_type accum_scale = 0;
// fill the scale at [n, :, h, w]
// accumulate values
while (head < post_pad && head < channels) {
accum_scale += in[head * step] * in[head * step];
++head;
}
// until we reach size, nothing needs to be subtracted
while (head < size && head < channels) {
accum_scale += in[head * step] * in[head * step];
scale[(head - post_pad) * step] = 1. + accum_scale * alpha_over_size;
++head;
}
// both add and subtract
while (head < channels) {
accum_scale += in[head * step] * in[head * step];
accum_scale -= in[(head - size) * step] * in[(head - size) * step];
scale[(head - post_pad) * step] = 1. + accum_scale * alpha_over_size;
++head;
}
// subtract only
while (head < channels + post_pad) {
if (head >= size) {
accum_scale -= in[(head - size) * step] * in[(head - size) * step];
}
if (head >= post_pad) {
scale[(head - post_pad) * step] = 1. + accum_scale * alpha_over_size;
}
++head;
}
}
}
__global__ static void lrn_compute_output(const int nthreads, const data_type* in,
const data_type* scale, const data_type negative_beta, data_type* out) {
CUDA_KERNEL_LOOP(index, nthreads) {
out[index] = in[index] * pow(scale[index], negative_beta);
}
}
void lrn_layer::on_forward(int device_index) {
const data_type* input_data = inputs_[0]->get(device_index)->gpu_data();
data_type* output_data = outputs_[0]->get(device_index)->mutable_gpu_data();
data_type* scale_data = scale_->get(device_index)->mutable_gpu_data();
auto &inputs = *inputs_[0]->get(device_index);
// First, compute scale
// We will launch one kernel for each pixel location, and have the kernel
// go through all the channels.
int n_threads = inputs_[0]->get(device_index)->count() / inputs_[0]->get(device_index)->channels();
KERNEL_CALL(lrn_fill_scale, n_threads)(
n_threads,
input_data,
inputs.num(),
inputs.channels(),
inputs.height(),
inputs.width(),
size_,
alpha_ / size_,
scale_data
);
// then forward
n_threads = inputs_[0]->get(device_index)->count();
KERNEL_CALL(lrn_compute_output, n_threads)(n_threads, input_data, scale_data, -beta_, output_data);
CUDA_POST_KERNEL_CHECK;
}
__global__ static void lrn_compute_diff(const int nthreads, const data_type* bottom_data, const data_type* top_data, const data_type* scale, const data_type* top_diff,
const int num, const int channels, const int height, const int width, const int size, const data_type negative_beta,
const data_type cache_ratio, data_type* bottom_diff, const data_type scale_targets) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int n = index / width / height;
int offset = (n * channels * height + h) * width + w;
int step = height * width;
bottom_data += offset;
top_data += offset;
scale += offset;
top_diff += offset;
bottom_diff += offset;
int head = 0;
int pre_pad = size - (size + 1) / 2;
int post_pad = size - pre_pad - 1;
data_type accum_ratio = 0;
// accumulate values
while (head < post_pad && head < channels) {
accum_ratio += top_diff[head * step] * top_data[head * step] /
scale[head * step];
++head;
}
// until we reach size, nothing needs to be subtracted
while (head < size && head < channels) {
accum_ratio += top_diff[head * step] * top_data[head * step] /
scale[head * step];
bottom_diff[(head - post_pad) * step] = bottom_diff[(head - post_pad) * step] * scale_targets
+ top_diff[(head - post_pad) * step]
* pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio *
bottom_data[(head - post_pad) * step] * accum_ratio;
++head;
}
// both add and subtract
while (head < channels) {
accum_ratio += top_diff[head * step] * top_data[head * step] /
scale[head * step];
accum_ratio -= top_diff[(head - size) * step] *
top_data[(head - size) * step] / scale[(head - size) * step];
bottom_diff[(head - post_pad) * step] = bottom_diff[(head - post_pad) * step] * scale_targets
+ top_diff[(head - post_pad) * step]
* pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio *
bottom_data[(head - post_pad) * step] * accum_ratio;
++head;
}
// subtract only
while (head < channels + post_pad) {
if (head >= size) {
accum_ratio -= top_diff[(head - size) * step] *
top_data[(head - size) * step] / scale[(head - size) * step];
}
if (head >= post_pad) {
bottom_diff[(head - post_pad) * step] = bottom_diff[(head - post_pad) * step] * scale_targets
+ top_diff[(head - post_pad) * step]
* pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio *
bottom_data[(head - post_pad) * step] * accum_ratio;
}
++head;
}
}
}
void lrn_layer::on_backward(int device_index, act_selector bp_acts, weight_selector bp_weights, act_selector clear_acts_diff, weight_selector clear_weights_diff) {
const data_type beta_acts = get_beta(clear_acts_diff, 0);
if (should_bp(bp_acts, 0)) {
auto &inputs = *inputs_[0]->get(device_index);
int n_threads = inputs.count() / inputs.channels();
if (beta_acts == 0) {
inputs.fill_diff(0.f);
}
KERNEL_CALL(lrn_compute_diff, n_threads)(
n_threads,
inputs.gpu_data(),
outputs_[0]->get(device_index)->gpu_data(),
scale_->get(device_index)->gpu_data(),
outputs_[0]->get(device_index)->gpu_diff(),
inputs.num(),
inputs.channels(),
inputs.height(),
inputs.width(),
size_,
-beta_,
(data_type)(2. * alpha_ * beta_ / size_),
inputs.mutable_gpu_diff(),
beta_acts
);
CUDA_POST_KERNEL_CHECK;
}
}
}
|
c337b19077dc509b50b336cb49b8049b32990784.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cassert>
#include <cstring>
#include <hip/hip_runtime.h>
#include <vector>
#include "NvInfer.h"
#include "bertCommon.h"
#include "common_hip.cuh"
#include "plugin.h"
#include "serialize.hpp"
using namespace nvinfer1;
namespace bert
{
template <typename T, unsigned TPB, unsigned VPT>
__global__ void embLayerNormKernelVarSeqlenMTron(int ld, const uint32_t* cuSeqlens, const int* inputIds,
const int* segmentIds, const T* beta, const T* gamma, const T* tokEmb, const T* posEmb, const T* segEmb, T* output,
T* skip)
{
using BlockReduce = hipcub::BlockReduce<kvp<T>, TPB>;
__shared__ typename BlockReduce::TempStorage temp_storage;
const int b = blockIdx.x;
const int s = blockIdx.y;
const int sum_s = cuSeqlens[b];
const int s_b = cuSeqlens[b + 1] - sum_s;
// either the whole CTA has work or not
if (s >= s_b)
return;
const int inOffset = (sum_s + s);
const int outOffset = (sum_s + s) * ld;
// 1. lookup word and token of the block
// blockIdx.x = position in the sequence
// blockIdx.y = batch
// gridDim.x = S
// gridDim.y = B
__shared__ int inputId;
__shared__ int segmentId;
if (threadIdx.x == 0)
{
inputId = inputIds[inOffset];
segmentId = segmentIds[inOffset];
}
__syncthreads();
// 2. load pos/tok/word embeddings and add them toghether
// offset into embeddings is given by wordId * hidden_size
const int poffset = s * ld;
const int ioffset = inputId * ld;
const int soffset = segmentId * ld;
// 16B per thread: 8 elements. there should be ld / VPT threads per CTA
// 1024: 128 threads
// 768: 96 threads
const int toffset = threadIdx.x * VPT;
// 4 * 1024 * 4 * 2 Bytes = 16KB per block
T i_local[VPT];
T s_local[VPT];
T p_local[VPT];
// read embeddings
copy<sizeof(T) * VPT>(&tokEmb[ioffset + toffset], i_local);
copy<sizeof(T) * VPT>(&segEmb[soffset + toffset], s_local);
copy<sizeof(T) * VPT>(&posEmb[poffset + toffset], p_local);
T local = 0.f;
T local2 = 0.f;
const T rld = T(1) / T(ld);
#pragma unroll
for (int it = 0; it < VPT; it++)
{
i_local[it] += s_local[it] + p_local[it];
const T tmp = rld * i_local[it];
local += tmp;
local2 += tmp * i_local[it];
}
// load params
copy<sizeof(T) * VPT>(i_local, &skip[outOffset + toffset]);
copy<sizeof(T) * VPT>(&beta[toffset], p_local);
copy<sizeof(T) * VPT>(&gamma[toffset], s_local);
__shared__ T mu; // mean
__shared__ T rsigma; // 1 / std.dev.
const auto sumKV = BlockReduce(temp_storage).Reduce(kvp<T>(local, local2), hipcub::Sum());
if (threadIdx.x == 0)
{
mu = sumKV.key;
rsigma = rsqrt(sumKV.value - mu * mu);
}
__syncthreads();
///*
#pragma unroll
for (int it = 0; it < VPT; it++)
{
i_local[it] = s_local[it] * (i_local[it] - mu) * rsigma + p_local[it];
}
/* */
copy<sizeof(T) * VPT>(i_local, &output[outOffset + toffset]);
}
template <typename T>
int embSkipLayerNormVarSeqlenMTron(hipStream_t stream, int ld, int B, int S, const uint32_t* cuSeqlens,
const int* inputIds, const int* token_ids, const T* beta, const T* gamma, const T* wordEmb, const T* posEmb,
const T* tokEmb, T* output, T* skip)
{
const dim3 grid(B, S, 1);
if (ld == 1024)
{
constexpr int VPT = 16 / sizeof(T);
constexpr int TPB = 1024 / VPT;
const dim3 block(TPB, 1, 1);
hipLaunchKernelGGL(( embLayerNormKernelVarSeqlenMTron<T, TPB, VPT>), dim3(grid), dim3(block), 0, stream,
ld, cuSeqlens, inputIds, token_ids, beta, gamma, wordEmb, posEmb, tokEmb, output, skip);
}
else if (ld == 768)
{
constexpr int VPT = 16 / sizeof(T);
constexpr int TPB = 768 / VPT;
const dim3 block(TPB, 1, 1);
hipLaunchKernelGGL(( embLayerNormKernelVarSeqlenMTron<T, TPB, VPT>), dim3(grid), dim3(block), 0, stream,
ld, cuSeqlens, inputIds, token_ids, beta, gamma, wordEmb, posEmb, tokEmb, output, skip);
}
else
{
assert(false && "Unsupported hidden dimension");
}
CHECK(hipPeekAtLastError());
return 0;
}
template int embSkipLayerNormVarSeqlenMTron<float>(hipStream_t, int, int, int, const uint32_t*, const int*, const int*,
const float*, const float*, const float*, const float*, const float*, float*, float*);
template int embSkipLayerNormVarSeqlenMTron<half>(hipStream_t, int, int, int, const uint32_t*, const int*, const int*,
const half*, const half*, const half*, const half*, const half*, half*, half*);
/// REDO BASED ON OLD KERNEL TO REPRODUCE EXACT RESULTS
template <typename T, unsigned TPB>
__global__ void embLayerNormKernelMTron(int ld, const int* inputIds, const int* tokenIds, const int* cuSeqlens,
const float* beta, const float* gamma, const T* wordEmb, const T* posEmb, const T* tokEmb, T* output, T* skip)
{
// this code currently assumes the input shape is SxB, row-major => seqPos = s * B + b
// instead we want BxS, row-major => seqPos = b * S + s
hipcub::Sum pairSum;
// 1. lookup word and token of the block
// blockIdx.x = position in the sequence
// blockIdx.y = batch
// gridDim.x = S
// gridDim.y = B
const int s = blockIdx.x;
const int b = blockIdx.y;
const int sumS = cuSeqlens[b];
const int s_b = cuSeqlens[b + 1] - sumS;
if (s >= s_b)
return; // This CTA has nothing to do
__shared__ int wordId;
__shared__ int tokenId;
const T rld = T(1.f) / T(ld);
// seqPos = b + s * B
// const int seqPos = blockIdx.y + blockIdx.x * gridDim.y;
// const int seqPos = s * B + s;
const int seqPos = sumS + s;
if (threadIdx.x == 0)
{
wordId = inputIds[seqPos];
tokenId = tokenIds[seqPos];
}
__syncthreads();
// 2. load pos/tok/word embeddings and add them toghether
// offset into embeddings is given by wordId * hidden_size
const int poffset = blockIdx.x * ld;
const int woffset = wordId * ld;
const int toffset = tokenId * ld;
// the output offset is given by b * (S*hidden_size) + s * hidden_size
const int outOffset = seqPos * ld;
kvp<T> threadData(0, 0);
for (int it = threadIdx.x; it < ld; it += TPB)
{
const T w(wordEmb[woffset + it]);
const T t(tokEmb[toffset + it]);
const T p(posEmb[poffset + it]);
const T val = w + t + p;
output[outOffset + it] = val;
skip[outOffset + it] = val;
const T rldval = rld * val;
threadData = pairSum(threadData, kvp<T>(rldval, rldval * val));
}
// 3. layer norm on the sum
layerNorm<T, T, float, TPB>(threadData, ld, outOffset, beta, gamma, output);
}
template <typename T>
int embSkipLayerNormMTron(hipStream_t stream, int ld, int B, int S, const int* inputIds, const int* tokenIds,
const int* cuSeqlens, const float* beta, const float* gamma, const T* wordEmb, const T* posEmb, const T* tokEmb,
T* output, T* skip)
{
constexpr int tpb = 256;
const dim3 grid(S, B, 1);
const dim3 block(tpb, 1, 1);
hipLaunchKernelGGL(( embLayerNormKernelMTron<T, tpb>), dim3(grid), dim3(block), 0, stream,
ld, inputIds, tokenIds, cuSeqlens, beta, gamma, wordEmb, posEmb, tokEmb, output, skip);
return hipPeekAtLastError();
}
template int embSkipLayerNormMTron<float>(hipStream_t, int, int, int, const int*, const int*, const int*, const float*,
const float*, const float*, const float*, const float*, float*, float*);
template int embSkipLayerNormMTron<half>(hipStream_t, int, int, int, const int*, const int*, const int*, const float*,
const float*, const half*, const half*, const half*, half*, half*);
} // namespace bert
|
c337b19077dc509b50b336cb49b8049b32990784.cu
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cassert>
#include <cstring>
#include <cuda.h>
#include <vector>
#include "NvInfer.h"
#include "bertCommon.h"
#include "common.cuh"
#include "plugin.h"
#include "serialize.hpp"
using namespace nvinfer1;
namespace bert
{
template <typename T, unsigned TPB, unsigned VPT>
__global__ void embLayerNormKernelVarSeqlenMTron(int ld, const uint32_t* cuSeqlens, const int* inputIds,
const int* segmentIds, const T* beta, const T* gamma, const T* tokEmb, const T* posEmb, const T* segEmb, T* output,
T* skip)
{
using BlockReduce = cub::BlockReduce<kvp<T>, TPB>;
__shared__ typename BlockReduce::TempStorage temp_storage;
const int b = blockIdx.x;
const int s = blockIdx.y;
const int sum_s = cuSeqlens[b];
const int s_b = cuSeqlens[b + 1] - sum_s;
// either the whole CTA has work or not
if (s >= s_b)
return;
const int inOffset = (sum_s + s);
const int outOffset = (sum_s + s) * ld;
// 1. lookup word and token of the block
// blockIdx.x = position in the sequence
// blockIdx.y = batch
// gridDim.x = S
// gridDim.y = B
__shared__ int inputId;
__shared__ int segmentId;
if (threadIdx.x == 0)
{
inputId = inputIds[inOffset];
segmentId = segmentIds[inOffset];
}
__syncthreads();
// 2. load pos/tok/word embeddings and add them toghether
// offset into embeddings is given by wordId * hidden_size
const int poffset = s * ld;
const int ioffset = inputId * ld;
const int soffset = segmentId * ld;
// 16B per thread: 8 elements. there should be ld / VPT threads per CTA
// 1024: 128 threads
// 768: 96 threads
const int toffset = threadIdx.x * VPT;
// 4 * 1024 * 4 * 2 Bytes = 16KB per block
T i_local[VPT];
T s_local[VPT];
T p_local[VPT];
// read embeddings
copy<sizeof(T) * VPT>(&tokEmb[ioffset + toffset], i_local);
copy<sizeof(T) * VPT>(&segEmb[soffset + toffset], s_local);
copy<sizeof(T) * VPT>(&posEmb[poffset + toffset], p_local);
T local = 0.f;
T local2 = 0.f;
const T rld = T(1) / T(ld);
#pragma unroll
for (int it = 0; it < VPT; it++)
{
i_local[it] += s_local[it] + p_local[it];
const T tmp = rld * i_local[it];
local += tmp;
local2 += tmp * i_local[it];
}
// load params
copy<sizeof(T) * VPT>(i_local, &skip[outOffset + toffset]);
copy<sizeof(T) * VPT>(&beta[toffset], p_local);
copy<sizeof(T) * VPT>(&gamma[toffset], s_local);
__shared__ T mu; // mean
__shared__ T rsigma; // 1 / std.dev.
const auto sumKV = BlockReduce(temp_storage).Reduce(kvp<T>(local, local2), cub::Sum());
if (threadIdx.x == 0)
{
mu = sumKV.key;
rsigma = rsqrt(sumKV.value - mu * mu);
}
__syncthreads();
///*
#pragma unroll
for (int it = 0; it < VPT; it++)
{
i_local[it] = s_local[it] * (i_local[it] - mu) * rsigma + p_local[it];
}
/* */
copy<sizeof(T) * VPT>(i_local, &output[outOffset + toffset]);
}
template <typename T>
int embSkipLayerNormVarSeqlenMTron(cudaStream_t stream, int ld, int B, int S, const uint32_t* cuSeqlens,
const int* inputIds, const int* token_ids, const T* beta, const T* gamma, const T* wordEmb, const T* posEmb,
const T* tokEmb, T* output, T* skip)
{
const dim3 grid(B, S, 1);
if (ld == 1024)
{
constexpr int VPT = 16 / sizeof(T);
constexpr int TPB = 1024 / VPT;
const dim3 block(TPB, 1, 1);
embLayerNormKernelVarSeqlenMTron<T, TPB, VPT><<<grid, block, 0, stream>>>(
ld, cuSeqlens, inputIds, token_ids, beta, gamma, wordEmb, posEmb, tokEmb, output, skip);
}
else if (ld == 768)
{
constexpr int VPT = 16 / sizeof(T);
constexpr int TPB = 768 / VPT;
const dim3 block(TPB, 1, 1);
embLayerNormKernelVarSeqlenMTron<T, TPB, VPT><<<grid, block, 0, stream>>>(
ld, cuSeqlens, inputIds, token_ids, beta, gamma, wordEmb, posEmb, tokEmb, output, skip);
}
else
{
assert(false && "Unsupported hidden dimension");
}
CHECK(cudaPeekAtLastError());
return 0;
}
template int embSkipLayerNormVarSeqlenMTron<float>(cudaStream_t, int, int, int, const uint32_t*, const int*, const int*,
const float*, const float*, const float*, const float*, const float*, float*, float*);
template int embSkipLayerNormVarSeqlenMTron<half>(cudaStream_t, int, int, int, const uint32_t*, const int*, const int*,
const half*, const half*, const half*, const half*, const half*, half*, half*);
/// REDO BASED ON OLD KERNEL TO REPRODUCE EXACT RESULTS
template <typename T, unsigned TPB>
__global__ void embLayerNormKernelMTron(int ld, const int* inputIds, const int* tokenIds, const int* cuSeqlens,
const float* beta, const float* gamma, const T* wordEmb, const T* posEmb, const T* tokEmb, T* output, T* skip)
{
// this code currently assumes the input shape is SxB, row-major => seqPos = s * B + b
// instead we want BxS, row-major => seqPos = b * S + s
cub::Sum pairSum;
// 1. lookup word and token of the block
// blockIdx.x = position in the sequence
// blockIdx.y = batch
// gridDim.x = S
// gridDim.y = B
const int s = blockIdx.x;
const int b = blockIdx.y;
const int sumS = cuSeqlens[b];
const int s_b = cuSeqlens[b + 1] - sumS;
if (s >= s_b)
return; // This CTA has nothing to do
__shared__ int wordId;
__shared__ int tokenId;
const T rld = T(1.f) / T(ld);
// seqPos = b + s * B
// const int seqPos = blockIdx.y + blockIdx.x * gridDim.y;
// const int seqPos = s * B + s;
const int seqPos = sumS + s;
if (threadIdx.x == 0)
{
wordId = inputIds[seqPos];
tokenId = tokenIds[seqPos];
}
__syncthreads();
// 2. load pos/tok/word embeddings and add them toghether
// offset into embeddings is given by wordId * hidden_size
const int poffset = blockIdx.x * ld;
const int woffset = wordId * ld;
const int toffset = tokenId * ld;
// the output offset is given by b * (S*hidden_size) + s * hidden_size
const int outOffset = seqPos * ld;
kvp<T> threadData(0, 0);
for (int it = threadIdx.x; it < ld; it += TPB)
{
const T w(wordEmb[woffset + it]);
const T t(tokEmb[toffset + it]);
const T p(posEmb[poffset + it]);
const T val = w + t + p;
output[outOffset + it] = val;
skip[outOffset + it] = val;
const T rldval = rld * val;
threadData = pairSum(threadData, kvp<T>(rldval, rldval * val));
}
// 3. layer norm on the sum
layerNorm<T, T, float, TPB>(threadData, ld, outOffset, beta, gamma, output);
}
template <typename T>
int embSkipLayerNormMTron(cudaStream_t stream, int ld, int B, int S, const int* inputIds, const int* tokenIds,
const int* cuSeqlens, const float* beta, const float* gamma, const T* wordEmb, const T* posEmb, const T* tokEmb,
T* output, T* skip)
{
constexpr int tpb = 256;
const dim3 grid(S, B, 1);
const dim3 block(tpb, 1, 1);
embLayerNormKernelMTron<T, tpb><<<grid, block, 0, stream>>>(
ld, inputIds, tokenIds, cuSeqlens, beta, gamma, wordEmb, posEmb, tokEmb, output, skip);
return cudaPeekAtLastError();
}
template int embSkipLayerNormMTron<float>(cudaStream_t, int, int, int, const int*, const int*, const int*, const float*,
const float*, const float*, const float*, const float*, float*, float*);
template int embSkipLayerNormMTron<half>(cudaStream_t, int, int, int, const int*, const int*, const int*, const float*,
const float*, const half*, const half*, const half*, half*, half*);
} // namespace bert
|
7a958920747ab34ec19cebcf0cf717a0c017b57c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// add by Binbin Xu
// declanxu@gmail.com or declanxu@126.com
// Zhejiang University, State Key Lab of CAD&CG.
#include "caffe/util/io.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void subAndDot(const int N, const int len, const Dtype* a, const Dtype* b, Dtype* out) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N){
// int index = i%len;
Dtype tmp = a[i] - b[i%len];
out[i] = tmp*tmp;
}
}
template <typename Dtype>
void TripletLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
//LOG(INFO) << "enter triplet Forward_gpu";
//LOG(INFO) << "inner_num_: " << inner_num_ << ", label_separator_: " << label_separator_;
Dtype loss = Dtype(0);
int batch_size = bottom[0]->num(); // get the batch_size
//CHECK_EQ(batch_size, bottom[1]->count());
//LOG(INFO) << batch_size << ":" << bottom[1]->num();
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_label = bottom[1]->cpu_data();
Dtype* diff_mutable = diff_.mutable_gpu_data();
Dtype* sub_mutable = sub_.mutable_gpu_data();
Dtype* diff_diff = diff_.mutable_gpu_diff(); // store the diff
caffe_gpu_set(diff_.count(), Dtype(0), diff_mutable);
caffe_gpu_set(diff_.count(), Dtype(0), diff_diff);
vector<int> labels(batch_size, 0);
for (int i = 0; i < batch_size; i++){
labels[i] = static_cast<int>(bottom_label[i]);
//std::cout << labels[i] << " ";
}
//std::cout << "\n";
int count = diff_.count();
//Dtype** mat = new Dtype*[batch_size];
Dtype* val = new Dtype[batch_size];
Dtype* device_scalar;
Dtype* device_tmp;
Dtype* middle;
//Dtype* middle_tmp = new Dtype[inner_num_];
CUDA_CHECK(hipMalloc((void**)&middle, inner_num_*sizeof(Dtype)));
CUDA_CHECK(hipMalloc((void**)&device_scalar, inner_num_*sizeof(Dtype)));
CUDA_CHECK(hipMalloc((void**)&device_tmp, batch_size*sizeof(Dtype)));
caffe_gpu_set(inner_num_, Dtype(1.0), device_scalar);
int N = batch_size*inner_num_;
for (int i = 0; i < batch_size; i++){
int label = labels[i];
// mat[i] = new Dtype[batch_size];
if (label < label_separator_) {
sub_mutable = sub_.mutable_gpu_data();
hipLaunchKernelGGL(( subAndDot), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, inner_num_, bottom_data, bottom_data+i*inner_num_, sub_mutable);
caffe_gpu_gemv(CblasNoTrans, inner_num_, batch_size, Dtype(1.0), sub_mutable, device_scalar, Dtype(0.0), device_tmp);
// hipblasDgemv(handle, batch_size, inner_num_, &Dtype(1.0), sub_mutable, batch_size, device_scalar, 1,&Dtype(0), mat[i], 1);
hipMemcpy(val, device_tmp, batch_size*sizeof(Dtype), hipMemcpyDeviceToHost);
// Dtype* val = mat[i];
// bool flag = true;
Dtype margin = Dtype(10000.0);
int tmp_k = -1;
int tmp_j = -1;
for (int j = 0; j < batch_size; j++){
if (j != i && labels[j] == label){ // j is the positive
for (int k = 0; k < batch_size; k++){
if (labels[k] != label) { // k is the negative
if (val[j] >= val[k]) {
loss += val[j] + alpha_ - val[k];
caffe_gpu_sub(inner_num_, bottom_data+k*inner_num_, bottom_data+j*inner_num_, middle);
caffe_gpu_axpy(inner_num_, Dtype(1.0), middle, diff_diff+i*inner_num_);
//caffe_gpu_sub(inner_num_, bottom_data+j*inner_num_, bottom_data+i*inner_num_, middle);
// caffe_gpu_axpy(inner_num_, Dtype(1.0), middle, diff_diff+j*inner_num_);
// caffe_gpu_sub(inner_num_, bottom_data+i*inner_num_, bottom_data+k*inner_num_, middle);
// caffe_gpu_axpy(inner_num_, Dtype(1.0), middle, diff_diff+k*inner_num_);
break;
}
else {
if (val[k] - val[j] <= 0.2) {
loss += val[j] + alpha_ - val[k];
caffe_gpu_sub(inner_num_, bottom_data+k*inner_num_, bottom_data+j*inner_num_, middle);
caffe_gpu_axpy(inner_num_, Dtype(1.0), middle, diff_diff+i*inner_num_);
// caffe_gpu_sub(inner_num_, bottom_data+j*inner_num_, bottom_data+i*inner_num_, middle);
// caffe_gpu_axpy(inner_num_, Dtype(1.0), middle, diff_diff+j*inner_num_);
// caffe_gpu_sub(inner_num_, bottom_data+i*inner_num_, bottom_data+k*inner_num_, middle);
// caffe_gpu_axpy(inner_num_, Dtype(1.0), middle, diff_diff+k*inner_num_);
break;
}
if (val[k] - val[j] < margin) {
tmp_k = k;
tmp_j = j;
margin = val[k] - val[j];
}
}
}
}
if (margin < alpha_ && tmp_k != -1) {
loss += val[tmp_j] + alpha_ - val[tmp_k];
caffe_gpu_sub(inner_num_, bottom_data+tmp_k*inner_num_, bottom_data+tmp_j*inner_num_, middle);
caffe_gpu_axpy(inner_num_, Dtype(1.0), middle, diff_diff+i*inner_num_);
// caffe_gpu_sub(inner_num_, bottom_data+tmp_j*inner_num_, bottom_data+i*inner_num_, middle);
// caffe_gpu_axpy(inner_num_, Dtype(1.0), middle, diff_diff+tmp_j*inner_num_);
// caffe_gpu_sub(inner_num_, bottom_data+i*inner_num_, bottom_data+tmp_k*inner_num_, middle);
// caffe_gpu_axpy(inner_num_, Dtype(1.0), middle, diff_diff+tmp_k*inner_num_);
}
}
}
}
}
top[0]->mutable_cpu_data()[0] = loss/(Dtype(2)*bottom[0]->num());
delete[] val;
// for (int i = 0; i < batch_size; i++) {
// delete[] mat[i];
// }
// delete[] mat;
}
template <typename Dtype>
void TripletLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]){
if (top[0]->cpu_diff()[0] != Dtype(1.0)){
LOG(INFO) << "Triplet.cu top cpu_diff is not 1.0 is " << top[0]->cpu_diff()[0];
}
Dtype scale = Dtype(2.0)*top[0]->cpu_diff()[0]/bottom[0]->num();
caffe_gpu_scale(
bottom[0]->count(), // count
scale, // scale
diff_.gpu_diff(), // input
bottom[0]->mutable_gpu_diff() // output
);
/*
const Dtype* ptr = bottom[0]->cpu_diff();
for (int i = 0; i < bottom[0]->num(); i++) {
int tmp = i*128;
std::cout << i << ": ";
for (int j = 0; j < 128; j++) {
std::cout << ptr[tmp++] << " ";
}
std::cout << "\n";
}
bottom[0]->gpu_diff();
CHECK_EQ(1,2);
*/
}
else {
LOG(ERROR) << "should be back propagate to prev-layer AT TripletLossLayer::Backward_cpu" << std::endl;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(TripletLossLayer);
} // namespace caffe
|
7a958920747ab34ec19cebcf0cf717a0c017b57c.cu
|
// add by Binbin Xu
// declanxu@gmail.com or declanxu@126.com
// Zhejiang University, State Key Lab of CAD&CG.
#include "caffe/util/io.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void subAndDot(const int N, const int len, const Dtype* a, const Dtype* b, Dtype* out) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N){
// int index = i%len;
Dtype tmp = a[i] - b[i%len];
out[i] = tmp*tmp;
}
}
template <typename Dtype>
void TripletLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
//LOG(INFO) << "enter triplet Forward_gpu";
//LOG(INFO) << "inner_num_: " << inner_num_ << ", label_separator_: " << label_separator_;
Dtype loss = Dtype(0);
int batch_size = bottom[0]->num(); // get the batch_size
//CHECK_EQ(batch_size, bottom[1]->count());
//LOG(INFO) << batch_size << ":" << bottom[1]->num();
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_label = bottom[1]->cpu_data();
Dtype* diff_mutable = diff_.mutable_gpu_data();
Dtype* sub_mutable = sub_.mutable_gpu_data();
Dtype* diff_diff = diff_.mutable_gpu_diff(); // store the diff
caffe_gpu_set(diff_.count(), Dtype(0), diff_mutable);
caffe_gpu_set(diff_.count(), Dtype(0), diff_diff);
vector<int> labels(batch_size, 0);
for (int i = 0; i < batch_size; i++){
labels[i] = static_cast<int>(bottom_label[i]);
//std::cout << labels[i] << " ";
}
//std::cout << "\n";
int count = diff_.count();
//Dtype** mat = new Dtype*[batch_size];
Dtype* val = new Dtype[batch_size];
Dtype* device_scalar;
Dtype* device_tmp;
Dtype* middle;
//Dtype* middle_tmp = new Dtype[inner_num_];
CUDA_CHECK(cudaMalloc((void**)&middle, inner_num_*sizeof(Dtype)));
CUDA_CHECK(cudaMalloc((void**)&device_scalar, inner_num_*sizeof(Dtype)));
CUDA_CHECK(cudaMalloc((void**)&device_tmp, batch_size*sizeof(Dtype)));
caffe_gpu_set(inner_num_, Dtype(1.0), device_scalar);
int N = batch_size*inner_num_;
for (int i = 0; i < batch_size; i++){
int label = labels[i];
// mat[i] = new Dtype[batch_size];
if (label < label_separator_) {
sub_mutable = sub_.mutable_gpu_data();
subAndDot<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(N, inner_num_, bottom_data, bottom_data+i*inner_num_, sub_mutable);
caffe_gpu_gemv(CblasNoTrans, inner_num_, batch_size, Dtype(1.0), sub_mutable, device_scalar, Dtype(0.0), device_tmp);
// cublasDgemv(handle, batch_size, inner_num_, &Dtype(1.0), sub_mutable, batch_size, device_scalar, 1,&Dtype(0), mat[i], 1);
cudaMemcpy(val, device_tmp, batch_size*sizeof(Dtype), cudaMemcpyDeviceToHost);
// Dtype* val = mat[i];
// bool flag = true;
Dtype margin = Dtype(10000.0);
int tmp_k = -1;
int tmp_j = -1;
for (int j = 0; j < batch_size; j++){
if (j != i && labels[j] == label){ // j is the positive
for (int k = 0; k < batch_size; k++){
if (labels[k] != label) { // k is the negative
if (val[j] >= val[k]) {
loss += val[j] + alpha_ - val[k];
caffe_gpu_sub(inner_num_, bottom_data+k*inner_num_, bottom_data+j*inner_num_, middle);
caffe_gpu_axpy(inner_num_, Dtype(1.0), middle, diff_diff+i*inner_num_);
//caffe_gpu_sub(inner_num_, bottom_data+j*inner_num_, bottom_data+i*inner_num_, middle);
// caffe_gpu_axpy(inner_num_, Dtype(1.0), middle, diff_diff+j*inner_num_);
// caffe_gpu_sub(inner_num_, bottom_data+i*inner_num_, bottom_data+k*inner_num_, middle);
// caffe_gpu_axpy(inner_num_, Dtype(1.0), middle, diff_diff+k*inner_num_);
break;
}
else {
if (val[k] - val[j] <= 0.2) {
loss += val[j] + alpha_ - val[k];
caffe_gpu_sub(inner_num_, bottom_data+k*inner_num_, bottom_data+j*inner_num_, middle);
caffe_gpu_axpy(inner_num_, Dtype(1.0), middle, diff_diff+i*inner_num_);
// caffe_gpu_sub(inner_num_, bottom_data+j*inner_num_, bottom_data+i*inner_num_, middle);
// caffe_gpu_axpy(inner_num_, Dtype(1.0), middle, diff_diff+j*inner_num_);
// caffe_gpu_sub(inner_num_, bottom_data+i*inner_num_, bottom_data+k*inner_num_, middle);
// caffe_gpu_axpy(inner_num_, Dtype(1.0), middle, diff_diff+k*inner_num_);
break;
}
if (val[k] - val[j] < margin) {
tmp_k = k;
tmp_j = j;
margin = val[k] - val[j];
}
}
}
}
if (margin < alpha_ && tmp_k != -1) {
loss += val[tmp_j] + alpha_ - val[tmp_k];
caffe_gpu_sub(inner_num_, bottom_data+tmp_k*inner_num_, bottom_data+tmp_j*inner_num_, middle);
caffe_gpu_axpy(inner_num_, Dtype(1.0), middle, diff_diff+i*inner_num_);
// caffe_gpu_sub(inner_num_, bottom_data+tmp_j*inner_num_, bottom_data+i*inner_num_, middle);
// caffe_gpu_axpy(inner_num_, Dtype(1.0), middle, diff_diff+tmp_j*inner_num_);
// caffe_gpu_sub(inner_num_, bottom_data+i*inner_num_, bottom_data+tmp_k*inner_num_, middle);
// caffe_gpu_axpy(inner_num_, Dtype(1.0), middle, diff_diff+tmp_k*inner_num_);
}
}
}
}
}
top[0]->mutable_cpu_data()[0] = loss/(Dtype(2)*bottom[0]->num());
delete[] val;
// for (int i = 0; i < batch_size; i++) {
// delete[] mat[i];
// }
// delete[] mat;
}
template <typename Dtype>
void TripletLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]){
if (top[0]->cpu_diff()[0] != Dtype(1.0)){
LOG(INFO) << "Triplet.cu top cpu_diff is not 1.0 is " << top[0]->cpu_diff()[0];
}
Dtype scale = Dtype(2.0)*top[0]->cpu_diff()[0]/bottom[0]->num();
caffe_gpu_scale(
bottom[0]->count(), // count
scale, // scale
diff_.gpu_diff(), // input
bottom[0]->mutable_gpu_diff() // output
);
/*
const Dtype* ptr = bottom[0]->cpu_diff();
for (int i = 0; i < bottom[0]->num(); i++) {
int tmp = i*128;
std::cout << i << ": ";
for (int j = 0; j < 128; j++) {
std::cout << ptr[tmp++] << " ";
}
std::cout << "\n";
}
bottom[0]->gpu_diff();
CHECK_EQ(1,2);
*/
}
else {
LOG(ERROR) << "should be back propagate to prev-layer AT TripletLossLayer::Backward_cpu" << std::endl;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(TripletLossLayer);
} // namespace caffe
|
0ffe0333f6360cba17de5ce2b49e180d55d02307.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <multiply.h>
#include <basic_types.h>
#include <texture.h>
#include <util.h>
#include <cutil.h>
#ifdef _WIN32
#pragma warning (push)
#pragma warning (disable : 4244 4267 4521)
#endif
#include <cusp/multiply.h>
#include <matrix.h>
#include <matrix_cusp.h>
#include <amgx_cusparse.h>
#ifdef _WIN32
#pragma warning (pop)
#endif
#include <sm_utils.inl>
#include <amgx_types/math.h>
#include <amgx_types/util.h>
namespace amgx
{
#define USE_EXPERIMENTAL_4x4
template <class Matrix, class Vector>
class Multiply_1x1;
template <class Matrix, class Vector, class IVector>
class Multiply_1x1_masked;
template <class Matrix, class Vector>
class Multiply_1x1_with_mask;
template <class Matrix, class Vector>
class Multiply_1x1_with_mask_restriction;
template <class Matrix, class Vector>
class Multiply_3x3;
template <class Matrix, class Vector>
class Multiply_4x4;
template <class Matrix, class Vector>
class Multiply_bxb;
template <typename TConfig>
void multiply_block_size(Matrix<TConfig> &A, Vector<TConfig> &B, Vector<TConfig> &C, ViewType view)
{
typedef Matrix<TConfig> TMatrix;
typedef Vector<TConfig> TVector;
if (A.get_block_size() == 1)
{
Multiply_1x1<TMatrix, TVector>::multiply_1x1(A, B, C, view);
}
else if (A.get_block_dimy() == 3 && A.get_block_dimx() == 3)
{
Multiply_3x3<TMatrix, TVector>::multiply_3x3(A, B, C, view);
}
else if (A.get_block_dimy() == 4 && A.get_block_dimx() == 4)
{
Multiply_4x4<TMatrix, TVector>::multiply_4x4(A, B, C, view);
}
else
{
Multiply_bxb<TMatrix, TVector>::multiply_bxb(A, B, C, view);
}
}
template <typename TConfig>
void multiply(Matrix<TConfig> &A, Vector<TConfig> &B, Vector<TConfig> &C, ViewType view)
{
typedef Matrix<TConfig> TMatrix;
typedef Vector<TConfig> TVector;
if (!A.is_initialized())
{
FatalError("Trying to multiply uninitialized matrix", AMGX_ERR_BAD_PARAMETERS);
}
if (A.get_block_dimx() != B.get_block_dimy())
{
std::stringstream ss;
ss << "Matrix and vector dimensions don't match: A.dimx == " << A.get_block_dimx() << ", B.dimy == " << B.get_block_dimy();
FatalError(ss.str().c_str(), AMGX_ERR_BAD_PARAMETERS);
}
typedef Matrix<TConfig> TMatrix;
typedef Vector<TConfig> TVector;
bool latencyHiding = (view == A.getViewExterior() && A.getViewInterior() != A.getViewExterior() && !A.is_matrix_singleGPU() && B.dirtybit != 0);
if (latencyHiding)
{
A.manager->exchange_halo_split_gather(B, B.tag);
// Multiply interior rows
multiply_block_size(A, B, C, A.getViewInterior());
// Finish halo exchange
A.manager->exchange_halo_split_finish(B, B.tag);
// Multiply rows with halo dependencies
ViewType bnd_view = (ViewType)(~(A.getViewInterior()) & A.getViewExterior());
multiply_block_size(A, B, C, bnd_view);
}
else
{
if (view != INTERIOR && !A.is_matrix_singleGPU() && B.dirtybit != 0)
{
A.manager->exchange_halo_v2(B, B.tag);
}
multiply_block_size(A, B, C, view);
}
C.dirtybit = 1;
C.set_block_dimy(A.get_block_dimx());
}
template <class TConfig>
void multiply_masked(Matrix<TConfig> &A, Vector<TConfig> &B, Vector<TConfig> &C, typename Matrix<TConfig>::IVector &mask, ViewType view)
{
typedef Matrix<TConfig> TMatrix;
typedef Vector<TConfig> TVector;
typedef typename Matrix<TConfig>::IVector TIVector;
if (!A.is_initialized())
{
FatalError("Trying to multiply uninitialized matrix", AMGX_ERR_BAD_PARAMETERS);
}
if(A.get_block_size() != 1)
{
FatalError("Unsupported blocksize for multiply_masked()", AMGX_ERR_BAD_PARAMETERS);
}
if (A.get_block_dimx() != B.get_block_dimy())
{
std::stringstream ss;
ss << "Matrix and vector dimensions don't match: A.dimx == " << A.get_block_dimx() << ", B.dimy == " << B.get_block_dimy();
FatalError(ss.str().c_str(), AMGX_ERR_BAD_PARAMETERS);
}
Multiply_1x1_masked<TMatrix, TVector, TIVector>::multiply_1x1_masked(A, B, C, mask, view);
C.set_block_dimy(A.get_block_dimx());
}
template<class Matrix, class Vector>
void multiply_with_mask(Matrix &A, Vector &B, Vector &C)
{
if (!A.is_initialized())
{
FatalError("Trying to multiply uninitialized matrix", AMGX_ERR_BAD_PARAMETERS);
}
if (A.get_block_dimx() != B.get_block_dimy())
{
std::stringstream ss;
ss << "Matrix and vector dimensions don't match: A.dimx == " << A.get_block_dimx() << ", B.dimy == " << B.get_block_dimy();
FatalError(ss.str().c_str(), AMGX_ERR_BAD_PARAMETERS);
}
if (A.get_block_size() == 1)
{
Multiply_1x1_with_mask<Matrix, Vector>::multiply_1x1(A, B, C);
}
else
{
FatalError("multiply with mask not supported for bsize != 1", AMGX_ERR_NOT_IMPLEMENTED);
}
C.set_block_dimy(A.get_block_dimx());
C.dirtybit = 1;
//if (!A.is_matrix_singleGPU() && C.size() == B.size() && C.delayed_send==0)
// A.manager->exchange_halo_async(C, C.tag);
}
template<class Matrix, class Vector>
void multiply_with_mask_restriction(Matrix &A, Vector &B, Vector &C, Matrix &P)
{
if (!A.is_initialized())
{
FatalError("Trying to multiply uninitialized matrix", AMGX_ERR_BAD_PARAMETERS);
}
if (A.get_block_dimx() != B.get_block_dimy())
{
std::stringstream ss;
ss << "Matrix and vector dimensions don't match: A.dimx == " << A.get_block_dimx() << ", B.dimy == " << B.get_block_dimy();
FatalError(ss.str().c_str(), AMGX_ERR_BAD_PARAMETERS);
}
if (A.get_block_size() == 1)
{
Multiply_1x1_with_mask_restriction<Matrix, Vector>::multiply_1x1(A, B, C, P);
}
else
{
FatalError("multiply with mask not supported for bsize != 1", AMGX_ERR_NOT_IMPLEMENTED);
}
C.set_block_dimy(A.get_block_dimx());
C.dirtybit = 1;
}
template<class TConfig>
void multiplyMM(const Matrix<TConfig> &A, const Matrix<TConfig> &B, Matrix<TConfig> &C)
{
if (!A.is_initialized())
{
FatalError("Trying to multiply uninitialized matrix", AMGX_ERR_BAD_PARAMETERS);
}
if (A.get_block_dimx() != B.get_block_dimx() || A.get_block_dimy() != B.get_block_dimy())
{
FatalError("Matrices dimensions do not match", AMGX_ERR_BAD_PARAMETERS);
}
if (TConfig::memSpace == AMGX_device)
{
FatalError("Error, multiplyMM not implemented on device", AMGX_ERR_BAD_PARAMETERS);
}
else
{
if (A.get_block_size() != 1)
{
FatalError("multiplyMM only works for block_size ==1", AMGX_ERR_NOT_IMPLEMENTED);
}
else
{
typedef typename TConfig::IndPrec IndexType;
typedef typename TConfig::MatPrec ValueType;
typedef typename Matrix<TConfig>::IVector IVector;
typedef typename Matrix<TConfig>::MVector MVector;
C.set_initialized(0);
IndexType num_nonzeros = 0;
IVector mask(B.get_num_cols(), IndexType (-1));
// Compute nnz in C (including explicit zeros)
for (size_t i = 0; i < A.get_num_rows(); i++)
{
for (IndexType jj = A.row_offsets[i]; jj < A.row_offsets[i + 1]; jj++)
{
IndexType j = A.col_indices[jj];
for (IndexType kk = B.row_offsets[j]; kk < B.row_offsets[j + 1]; kk++)
{
IndexType k = B.col_indices[kk];
if (mask[k] != i)
{
mask[k] = i;
num_nonzeros++;
}
}
}
}
// Resize output
C.resize(A.get_num_rows(), B.get_num_cols(), num_nonzeros);
const IndexType unseen = static_cast<IndexType>(-1);
const IndexType init = static_cast<IndexType>(-2);
// Compute entries of C
IVector next(B.get_num_cols(), unseen);
MVector sums(B.get_num_cols(), types::util<ValueType>::get_zero());
num_nonzeros = 0;
C.row_offsets[0] = 0;
for (size_t i = 0; i < A.get_num_rows(); i++)
{
IndexType head = init;
IndexType length = 0;
IndexType jj_start = A.row_offsets[i];
IndexType jj_end = A.row_offsets[i + 1];
for (IndexType jj = jj_start; jj < jj_end; jj++)
{
IndexType j = A.col_indices[jj];
ValueType v = A.values[jj];
IndexType kk_start = B.row_offsets[j];
IndexType kk_end = B.row_offsets[j + 1];
for (IndexType kk = kk_start; kk < kk_end; kk++)
{
IndexType k = B.col_indices[kk];
sums[k] = sums[k] + v * B.values[kk];
if (next[k] == unseen)
{
next[k] = head;
head = k;
length++;
}
}
}
for (IndexType jj = 0; jj < length; jj++)
{
//if(sums[head] != ValueType(0))
//{
C.col_indices[num_nonzeros] = head;
C.values[num_nonzeros] = sums[head];
num_nonzeros++;
//}
IndexType temp = head;
head = next[head];
// clear arrays
next[temp] = unseen;
sums[temp] = types::util<ValueType>::get_zero();
}
C.row_offsets[i + 1] = num_nonzeros;
}
// Resize output again since pass2 omits explict zeros
//C.resize(A.num_rows, B.num_cols, num_nonzeros);
C.set_initialized(1);
}
}
}
// --------------------------------
// KERNELS
// --------------------------------
template<typename IndexType, typename ValueTypeA, typename ValueTypeB, int eighthwarps_per_block, int bsize, int log_bsize, int half_bsize, bool ROW_MAJOR>
__global__
void blockDiaCsrMultiplyKernel(const IndexType *row_offsets,
const IndexType *column_indices,
const ValueTypeA *nonzero_values,
const ValueTypeB *B,
ValueTypeB *C,
const IndexType num_block_rows,
const IndexType row_offset)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
int eighthwarp_id = row_offset + (tid >> log_bsize);
const int block_eighthwarp_id = threadIdx.x >> log_bsize;
const int vec_entry_index = threadIdx.x & (bsize - 1);
volatile __shared__ ValueTypeB s_xtemp[ bsize * eighthwarps_per_block ];
ValueTypeB C_temp;
int offset, s_offset;
ValueTypeB temp[bsize];
while (eighthwarp_id < num_block_rows)
{
//i = eighthwarp_id;
C_temp = types::util<ValueTypeB>::get_zero();
// Contribution from each nonzero column
int jmax = row_offsets[eighthwarp_id + 1];
for (int jind = row_offsets[eighthwarp_id]; jind < jmax; jind++)
{
IndexType jcol = column_indices[jind];
offset = jcol * bsize + vec_entry_index;
types::util<ValueTypeB>::volcast(__cachingLoad(&B[offset]), s_xtemp + threadIdx.x);
// Load nonzero_values
s_offset = block_eighthwarp_id * bsize;
if (ROW_MAJOR)
{
offset = jind * bsize * bsize + vec_entry_index * bsize;
loadAsVector<bsize>(nonzero_values + offset, temp);
}
else
{
offset = jind * bsize * bsize + vec_entry_index;
#pragma unroll
for (int m = 0; m < bsize; m++)
{
types::util<ValueTypeA>::to_uptype(nonzero_values[offset + bsize * m], temp[m]);
}
}
#pragma unroll
for (int m = 0; m < bsize; m++)
{
C_temp = C_temp + temp[m] * types::util<ValueTypeB>::volcast(s_xtemp[s_offset + m]);
}
}
C[eighthwarp_id * bsize + vec_entry_index] = C_temp;
eighthwarp_id += gridDim.x * blockDim.x >> log_bsize;
}
}
#ifdef USE_EXPERIMENTAL_4x4
template< typename IndexType, typename ValueTypeA, typename ValueTypeB, int CTA_SIZE, bool ROW_MAJOR >
__global__
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
__launch_bounds__( CTA_SIZE, 16 )
#elif defined(__CUDA_ARCH__)
__launch_bounds__( CTA_SIZE, 16 )
#endif
void blockDiaCsrMultiplyKernelDiaProps_4x4( const IndexType *row_offsets,
const IndexType *column_indices,
const IndexType *dia_ptr,
const ValueTypeA *nonzero_values,
const ValueTypeB *B,
ValueTypeB *C,
const int num_block_rows,
const int row_offset )
{
const int nHalfWarps = CTA_SIZE / 16; // Number of half warps per CTA.
const int laneId = threadIdx.x % warpSize;
const int halfWarpId = threadIdx.x / 16;
const int halfLaneId = threadIdx.x % 16;
const int halfLaneId_div_4 = halfLaneId / 4;
const int halfLaneId_mod_4 = halfLaneId % 4;
const int laneId_div_16 = laneId / 16;
const int upperHalf = 16 * laneId_div_16;
// Shared memory needed to exchange X and delta.
__shared__ volatile ValueTypeB s_mem[CTA_SIZE];
// Each thread keeps its own pointer to shared memory to avoid some extra computations.
volatile ValueTypeB *my_s_mem = &s_mem[16 * halfWarpId];
// Iterate over the rows of the matrix. One warp per two rows.
for ( int aRowId = blockIdx.x * nHalfWarps + halfWarpId ; aRowId < num_block_rows ; aRowId += gridDim.x * nHalfWarps )
{
unsigned int active_mask = utils::activemask();
// Load one block of B.
ValueTypeB my_Ax = types::util<ValueTypeB>::get_zero();
// The diagonal.
if ( halfLaneId_div_4 == 0 )
{
types::util<ValueTypeB>::volcast( B[4 * aRowId + halfLaneId_mod_4], my_s_mem + halfLaneId);
}
// Load the diagonal.
int diagId = dia_ptr[aRowId];
// Update my values.
ValueTypeA my_val = nonzero_values[16 * diagId + halfLaneId];
if ( ROW_MAJOR )
{
my_Ax = my_Ax + my_val * types::util<ValueTypeB>::volcast(my_s_mem[halfLaneId_mod_4]);
}
else
{
my_Ax = my_Ax + my_val * types::util<ValueTypeB>::volcast(my_s_mem[halfLaneId_div_4]);
}
// The range of the rows.
int aColBegin = row_offsets[aRowId + 0];
int aColEnd = row_offsets[aRowId + 1];
// Each warp load column indices of 16 nonzero blocks
for ( ; utils::any( aColBegin < aColEnd, active_mask ) ; aColBegin += 16 )
{
int aColIt = aColBegin + halfLaneId;
// Get the ID of the column.
int aColId = -1;
if ( aColIt < aColEnd )
{
aColId = column_indices[aColIt];
}
// Count the number of active columns.
int vote = utils::ballot(aColId != -1, active_mask);
// The number of iterations.
int nCols = max( __popc( vote & 0x0000ffff ), __popc( vote & 0xffff0000 ) );
// Loop over columns. We compute 8 columns per iteration.
for ( int k = 0 ; k < nCols ; k += 4 )
{
int my_k = k + halfLaneId_div_4;
// Exchange column indices.
int waColId = utils::shfl( aColId, upperHalf + my_k, warpSize, active_mask );
// Load 8 blocks of X if needed.
ValueTypeB my_x = types::util<ValueTypeB>::get_zero();
if ( waColId != -1 )
{
my_x = B[4 * waColId + halfLaneId_mod_4];
}
types::util<ValueTypeB>::volcast( my_x, my_s_mem + halfLaneId);
// Load 8 blocks of A.
#pragma unroll
for ( int i = 0 ; i < 4 ; ++i )
{
int w_aColTmp = aColBegin + k + i, w_aColIt = -1;
if ( w_aColTmp < aColEnd )
{
w_aColIt = w_aColTmp;
}
ValueTypeA my_val = types::util<ValueTypeA>::get_zero();
if ( w_aColIt != -1 )
{
my_val = nonzero_values[16 * w_aColIt + halfLaneId];
}
if ( ROW_MAJOR )
{
my_Ax = my_Ax + my_val * types::util<ValueTypeB>::volcast(my_s_mem[4 * i + halfLaneId_mod_4]);
}
else
{
my_Ax = my_Ax + my_val * types::util<ValueTypeB>::volcast(my_s_mem[4 * i + halfLaneId_div_4]);
}
}
} // Loop over k
} // Loop over aColIt
// Reduce bmAx terms.
if ( ROW_MAJOR )
{
my_Ax = my_Ax + utils::shfl_xor( my_Ax, 1, warpSize, active_mask );
my_Ax = my_Ax + utils::shfl_xor( my_Ax, 2, warpSize, active_mask );
}
else
{
my_Ax = my_Ax + utils::shfl_xor( my_Ax, 4, warpSize, active_mask );
my_Ax = my_Ax + utils::shfl_xor( my_Ax, 8, warpSize, active_mask );
}
// Store the results.
if ( ROW_MAJOR )
{
if ( halfLaneId_mod_4 == 0 )
{
C[4 * aRowId + halfLaneId_div_4] = my_Ax;
}
}
else
{
if ( halfLaneId_div_4 == 0 )
{
C[4 * aRowId + halfLaneId_mod_4] = my_Ax;
}
}
}
}
#else
template<typename IndexType, typename ValueTypeA, typename ValueTypeB, int eighthwarps_per_block, int bsize, int log_bsize, int half_bsize, int bsize_sq, bool ROW_MAJOR>
__global__
void blockDiaCsrMultiplyKernelDiaProps_4x4(const IndexType *row_offsets,
const IndexType *column_indices,
const IndexType *dia_ptr,
const ValueTypeA *nonzero_values,
const ValueTypeB *B,
ValueTypeB *C,
const int num_block_rows,
const int row_offset)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
int eighthwarp_id = row_offset + (tid >> log_bsize);
const int block_eighthwarp_id = threadIdx.x >> log_bsize;
const int vec_entry_index = threadIdx.x & (bsize - 1);
volatile __shared__ ValueTypeB s_xtemp[ bsize * eighthwarps_per_block ];
ValueTypeB C_temp;
int offset, s_offset;
while (eighthwarp_id < num_block_rows)
{
//i = eighthwarp_id;
C_temp = types::util<ValueTypeB>::get_zero();
// Contribution from diagonal
offset = eighthwarp_id * bsize + vec_entry_index;
types::util<ValueTypeB>::volcast(__cachingLoad(&B[offset]), s_xtemp + threadIdx.x);
// Load dia_values and do matrix multiply
s_offset = block_eighthwarp_id * bsize;
ValueTypeA temp[bsize];
if (ROW_MAJOR)
{
loadAsVector<bsize>(nonzero_values + bsize_sq * dia_ptr[eighthwarp_id] + vec_entry_index * bsize, temp);
}
else
{
#pragma unroll
for (int m = 0; m < bsize; m++)
{
temp[m] = nonzero_values[bsize_sq * dia_ptr[eighthwarp_id] + vec_entry_index + bsize * m];
}
}
#pragma unroll
for (int m = 0; m < bsize; m++)
{
C_temp = C_temp + temp[m] * types::util<ValueTypeB>::volcast(s_xtemp[s_offset + m]);
}
// Contribution from each nonzero column
int jmax = row_offsets[eighthwarp_id + 1];
for (int jind = row_offsets[eighthwarp_id]; jind < jmax; jind++)
{
IndexType jcol = column_indices[jind];
offset = jcol * bsize + vec_entry_index;
types::util<ValueTypeB>::volcast(__cachingLoad(&B[offset]), s_xtemp + threadIdx.x);
// Load nonzero_values
s_offset = block_eighthwarp_id * bsize;
if (ROW_MAJOR)
{
offset = jind * bsize_sq + vec_entry_index * bsize;
loadAsVector<bsize>(nonzero_values + offset, temp);
}
else
{
offset = jind * bsize_sq + vec_entry_index;
#pragma unroll
for (int m = 0; m < bsize; m++)
{
temp[m] = nonzero_values[offset + bsize * m];
}
}
#pragma unroll
for (int m = 0; m < bsize; m++)
{
C_temp = C_temp + temp[m] * types::util<ValueTypeB>::volcast(s_xtemp[s_offset + m]);
}
}
C[eighthwarp_id * bsize + vec_entry_index] = C_temp;
eighthwarp_id += gridDim.x * blockDim.x >> log_bsize;
}
}
#endif
// implementation for arbitrary block size
template<typename IndexType, typename ValueTypeA, typename ValueTypeB, int blockrows_per_cta, int blockrows_per_warp, int bsize, int diag, bool ROW_MAJOR>
__global__
void blockDiaCsrMultiplyKernelDiaProps(const IndexType *row_offsets,
const IndexType *column_indices,
const IndexType *dia_ptr,
const ValueTypeA *nonzero_values,
const ValueTypeB *B,
ValueTypeB *C,
const int num_block_rows,
const int row_offset)
{
int warp_id = threadIdx.x / 32;
int warp_thread_id = threadIdx.x & 31;
// padding row blocks to fit in a single warp
if ( warp_thread_id >= blockrows_per_warp * bsize ) { return; }
// new thread id with padding
int tid = warp_id * blockrows_per_warp * bsize + warp_thread_id;
int eighthwarp_id = row_offset + blockIdx.x * blockrows_per_cta + tid / bsize;
const int block_eighthwarp_id = tid / bsize;
const int vec_entry_index = tid % bsize;
const int bsize_sq = bsize * bsize;
volatile __shared__ ValueTypeB s_xtemp[ bsize * blockrows_per_cta ];
ValueTypeB C_temp;
int offset, s_offset;
while (eighthwarp_id < num_block_rows)
{
C_temp = types::util<ValueTypeB>::get_zero();
ValueTypeB temp[bsize];
if ( diag )
{
// Contribution from diagonal
offset = eighthwarp_id * bsize + vec_entry_index;
types::util<ValueTypeB>::volcast( __cachingLoad(&B[offset]), s_xtemp + tid);
// Load dia_values and do matrix multiply
s_offset = block_eighthwarp_id * bsize;
if (ROW_MAJOR)
{
loadAsVector<bsize>(nonzero_values + bsize_sq * dia_ptr[eighthwarp_id] + vec_entry_index * bsize, temp);
}
else
{
offset = dia_ptr[eighthwarp_id] * bsize_sq + vec_entry_index;
#pragma unroll
for (int m = 0; m < bsize; m++)
{
types::util<ValueTypeA>::to_uptype(nonzero_values[offset + bsize * m], temp[m]);
}
}
#pragma unroll
for (int m = 0; m < bsize; m++)
{
C_temp = C_temp + temp[m] * types::util<ValueTypeB>::volcast(s_xtemp[s_offset + m]);
}
}
// Contribution from each nonzero column
int jmax = row_offsets[eighthwarp_id + 1];
for (int jind = row_offsets[eighthwarp_id]; jind < jmax; jind++)
{
IndexType jcol = column_indices[jind];
offset = jcol * bsize + vec_entry_index;
types::util<ValueTypeB>::volcast( __cachingLoad(&B[offset]), s_xtemp + tid);
// Load nonzero_values
s_offset = block_eighthwarp_id * bsize;
if (ROW_MAJOR)
{
offset = jind * bsize_sq + vec_entry_index * bsize;
loadAsVector<bsize>(nonzero_values + offset, temp);
}
else
{
offset = jind * bsize_sq + vec_entry_index;
#pragma unroll
for (int m = 0; m < bsize; m++)
{
types::util<ValueTypeA>::to_uptype(nonzero_values[offset + bsize * m], temp[m]);
}
}
#pragma unroll
for (int m = 0; m < bsize; m++)
{
C_temp = C_temp + temp[m] * types::util<ValueTypeB>::volcast(s_xtemp[s_offset + m]);
}
}
C[eighthwarp_id * bsize + vec_entry_index] = C_temp;
eighthwarp_id += gridDim.x * blockrows_per_cta;
}
}
// --------------------------------------
// Methods
// -------------------------------------
// Method to perform BSPmV on host using block_dia_csr_matrix format
template <class Matrix, class Vector>
void multiply_common_sqblock_host_diag(const Matrix &A, const Vector &B, Vector &C)
{
typedef typename Matrix::TConfig TConfig;
if (TConfig::memSpace == AMGX_device)
{
FatalError("Executrion path error: device matrix in host path", AMGX_ERR_NOT_IMPLEMENTED);
}
else
{
//TODO:: This implementation is very inneficient, Use BLAS
typedef typename TConfig::IndPrec IndexType;
typedef typename TConfig::MatPrec ValueType;
typedef typename Vector::value_type ValueTypeB;
IndexType bsize = A.get_block_dimy();
ValueTypeB temp;
for (int i = 0; i < A.get_num_rows(); i++)
{
// Initialize RHS to 0
for (int m = 0; m < bsize; m++)
{
C[i * bsize + m] = types::util<ValueTypeB>::get_zero();
}
// Contribution from diagonal blocks
for (int n = 0; n < bsize; n++)
{
temp = B[i * bsize + n];
for (int m = 0; m < bsize; m++)
{
C[i * bsize + m] = C[i * bsize + m] + A.values[A.diag[i] * bsize * bsize + m * bsize + n] * temp;
}
}
// Contribution from nonzero off-diagonal blocks
for (int j = A.row_offsets[i]; j < A.row_offsets[i + 1]; j++)
{
IndexType jcol = A.col_indices[j];
for (int n = 0; n < bsize; n++)
{
temp = B[jcol * bsize + n];
for (int m = 0; m < bsize; m++)
{
C[i * bsize + m] = C[i * bsize + m] + A.values[j * bsize * bsize + m * bsize + n] * temp;
}
}
}
}
}
}
template <class Matrix, class Vector>
void multiply_common_sqblock_host_nodiag(const Matrix &A, const Vector &B, Vector &C)
{
typedef typename Matrix::TConfig TConfig;
if (TConfig::memSpace == AMGX_device)
{
FatalError("Executrion path error: device matrix in host path", AMGX_ERR_NOT_IMPLEMENTED);
}
else
{
//TODO:: This implementation is very inneficient, Use BLAS
typedef typename TConfig::IndPrec IndexType;
typedef typename TConfig::MatPrec ValueType;
typedef typename Vector::value_type ValueTypeB;
IndexType bsize = A.get_block_dimy();
ValueTypeB temp;
for (int i = 0; i < A.get_num_rows(); i++)
{
// Initialize RHS to 0
for (int m = 0; m < bsize; m++)
{
C[i * bsize + m] = types::util<ValueTypeB>::get_zero();
}
// Contribution from nonzero blocks
for (int j = A.row_offsets[i]; j < A.row_offsets[i + 1]; j++)
{
IndexType jcol = A.col_indices[j];
for (int n = 0; n < bsize; n++)
{
temp = B[jcol * bsize + n];
for (int m = 0; m < bsize; m++)
{
C[i * bsize + m] = C[i * bsize + m] + A.values[j * bsize * bsize + m * bsize + n] * temp;
}
}
}
}
}
}
template <class Matrix, class Vector>
class Multiply_1x1
{
public:
typedef typename Matrix::TConfig TConfig;
static void multiply_1x1(Matrix &A, Vector &B, Vector &C, ViewType view)
{
if (TConfig::memSpace == AMGX_host)
{
if (A.hasProps(DIAG))
{
multiply_common_sqblock_host_diag(A, B, C);
}
else
{
multiply_common_sqblock_host_nodiag(A, B, C);
}
}
else
{
typedef typename TConfig::VecPrec ValueTypeB;
Cusparse::bsrmv<TConfig>(types::util<ValueTypeB>::get_one(), A, B, types::util<ValueTypeB>::get_zero(), C, view);
cudaCheckError();
// FatalError("Mixed precision is not supported for scalar matrix type", AMGX_ERR_NOT_IMPLEMENTED);
}
}
};
template <class Matrix, class Vector, class IVector>
class Multiply_1x1_masked
{
public:
typedef typename Matrix::TConfig TConfig;
static void multiply_1x1_masked(Matrix &A, Vector &B, Vector &C, IVector mask, ViewType view)
{
if (TConfig::memSpace == AMGX_host)
{
FatalError("Masked multiply is not supported on host", AMGX_ERR_NOT_IMPLEMENTED);
}
else
{
typedef typename TConfig::VecPrec ValueTypeB;
Cusparse::bsrxmv<TConfig>(types::util<ValueTypeB>::get_one(), A, B, types::util<ValueTypeB>::get_zero(), C, mask, view);
cudaCheckError();
}
}
};
template <class Matrix, class Vector>
class Multiply_1x1_with_mask
{
public:
typedef typename Matrix::TConfig TConfig;
static void multiply_1x1(Matrix &A, Vector &B, Vector &C)
{
if (TConfig::memSpace == AMGX_host)
{
FatalError("multiply with mask not supported on host", AMGX_ERR_NOT_IMPLEMENTED);
}
else
{
typedef typename TConfig::VecPrec ValueTypeB;
Cusparse::bsrmv_with_mask<TConfig>(types::util<ValueTypeB>::get_one(), A, B, types::util<ValueTypeB>::get_zero(), C );
cudaCheckError();
// FatalError("Mixed precision is not supported for scalar matrix type", AMGX_ERR_NOT_IMPLEMENTED);
}
}
};
template <class Matrix, class Vector>
class Multiply_1x1_with_mask_restriction
{
public:
typedef typename Matrix::TConfig TConfig;
static void multiply_1x1(Matrix &A, Vector &B, Vector &C, Matrix &P)
{
if (TConfig::memSpace == AMGX_host)
{
FatalError("multiply with mask not supported on host", AMGX_ERR_NOT_IMPLEMENTED);
}
else
{
typedef typename TConfig::VecPrec ValueTypeB;
Cusparse::bsrmv_with_mask_restriction<TConfig>(types::util<ValueTypeB>::get_one(), A, B, types::util<ValueTypeB>::get_zero(), C, P);
cudaCheckError();
// FatalError("Mixed precision is not supported for scalar matrix type", AMGX_ERR_NOT_IMPLEMENTED);
}
}
};
// Method to perform BSPmV on device using block_dia_csr_matrix format
template <class Matrix, class Vector>
class Multiply_4x4
{
public:
typedef typename Matrix::TConfig TConfig;
static void multiply_4x4(const Matrix &A, const Vector &B, Vector &C, ViewType view)
{
if (TConfig::memSpace == AMGX_host)
{
if (A.hasProps(DIAG))
{
multiply_common_sqblock_host_diag(A, B, C);
}
else
{
multiply_common_sqblock_host_nodiag(A, B, C);
}
}
else
{
//TODO: compare with cublas
typedef typename TConfig::IndPrec IndexType;
typedef typename TConfig::MatPrec ValueTypeA;
typedef typename TConfig::VecPrec ValueTypeB;
int num_rows, offset;
A.getOffsetAndSizeForView(view, &offset, &num_rows);
const IndexType *A_row_offsets_ptr = A.row_offsets.raw();
const IndexType *A_column_indices_ptr = A.col_indices.raw();
const IndexType *A_dia_ind_ptr = A.diag.raw();
const ValueTypeA *A_nonzero_values_ptr = A.values.raw();
const ValueTypeB *B_ptr = B.raw();
ValueTypeB *C_ptr = C.raw();
cudaCheckError();
const unsigned int threads_per_block = 128;
const int eightwarps_per_block = threads_per_block / 4;
const int num_warps_per_cta = threads_per_block / 32;
const int num_blocks = ::min(AMGX_GRID_MAX_SIZE, (int) (num_rows + num_warps_per_cta - 1) / num_warps_per_cta); // (int) (A.get_num_rows()-1)/eightwarps_per_block + 1;
if (!A.hasProps(DIAG))
{
if (A.getBlockFormat() == ROW_MAJOR)
{
hipFuncSetCacheConfig(blockDiaCsrMultiplyKernel<IndexType, ValueTypeA, ValueTypeB, eightwarps_per_block, 4, 2, 2, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( blockDiaCsrMultiplyKernel<IndexType, ValueTypeA, ValueTypeB, eightwarps_per_block, 4, 2, 2, true>) , dim3(num_blocks), dim3(threads_per_block), 0, 0, A_row_offsets_ptr, A_column_indices_ptr, A_nonzero_values_ptr, B_ptr, C_ptr, offset + num_rows, offset);
}
else
{
hipFuncSetCacheConfig(blockDiaCsrMultiplyKernel<IndexType, ValueTypeA, ValueTypeB, eightwarps_per_block, 4, 2, 2, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( blockDiaCsrMultiplyKernel<IndexType, ValueTypeA, ValueTypeB, eightwarps_per_block, 4, 2, 2, false>) , dim3(num_blocks), dim3(threads_per_block), 0, 0, A_row_offsets_ptr, A_column_indices_ptr, A_nonzero_values_ptr, B_ptr, C_ptr, offset + num_rows, offset);
}
}
else
{
#ifdef USE_EXPERIMENTAL_4x4
if ( A.getBlockFormat() == ROW_MAJOR )
{
hipLaunchKernelGGL(( blockDiaCsrMultiplyKernelDiaProps_4x4<IndexType, ValueTypeA, ValueTypeB, threads_per_block, true >) , dim3(num_blocks), dim3(threads_per_block), 0, 0, A_row_offsets_ptr, A_column_indices_ptr, A_dia_ind_ptr, A_nonzero_values_ptr, B_ptr, C_ptr, offset + num_rows, offset);
}
else
{
hipLaunchKernelGGL(( blockDiaCsrMultiplyKernelDiaProps_4x4<IndexType, ValueTypeA, ValueTypeB, threads_per_block, false >) , dim3(num_blocks), dim3(threads_per_block), 0, 0, A_row_offsets_ptr, A_column_indices_ptr, A_dia_ind_ptr, A_nonzero_values_ptr, B_ptr, C_ptr, offset + num_rows, offset);
}
#else
if (A.getBlockFormat() == ROW_MAJOR)
{
hipFuncSetCacheConfig(blockDiaCsrMultiplyKernelDiaProps_4x4<IndexType, ValueTypeA, ValueTypeB, eightwarps_per_block, 4, 2, 2, 16, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( blockDiaCsrMultiplyKernelDiaProps_4x4<IndexType, ValueTypeA, ValueTypeB, eightwarps_per_block, 4, 2, 2, 16, true>) , dim3(num_blocks), dim3(threads_per_block), 0, 0, A_row_offsets_ptr, A_column_indices_ptr, A_dia_ind_ptr, A_nonzero_values_ptr, B_ptr, C_ptr, offset + num_rows, offset);
}
else
{
hipFuncSetCacheConfig(blockDiaCsrMultiplyKernelDiaProps_4x4<IndexType, ValueTypeA, ValueTypeB, eightwarps_per_block, 4, 2, 2, 16, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( blockDiaCsrMultiplyKernelDiaProps_4x4<IndexType, ValueTypeA, ValueTypeB, eightwarps_per_block, 4, 2, 2, 16, false>) , dim3(num_blocks), dim3(threads_per_block), 0, 0, A_row_offsets_ptr, A_column_indices_ptr, A_dia_ind_ptr, A_nonzero_values_ptr, B_ptr, C_ptr, offset + num_rows, offset);
}
#endif
}
cudaCheckError();
}
}
};
// Method to perform BSPmV on device using block_dia_csr_matrix format
template <class Matrix, class Vector>
class Multiply_bxb
{
public:
typedef typename Matrix::TConfig TConfig;
static void multiply_bxb(Matrix &A, Vector &B, Vector &C, ViewType view)
{
if (TConfig::memSpace == AMGX_host)
{
if (A.hasProps(DIAG))
{
multiply_common_sqblock_host_diag(A, B, C);
}
else
{
multiply_common_sqblock_host_nodiag(A, B, C);
}
}
else
{
typedef typename TConfig::VecPrec ValueTypeB;
Cusparse::bsrmv(types::util<ValueTypeB>::get_one(), A, B, types::util<ValueTypeB>::get_zero(), C, view);
cudaCheckError();
}
}
};
// Method to perform BSPmV on device using block_dia_csr_matrix format
template <class Matrix, class Vector>
class Multiply_3x3
{
public:
typedef typename Matrix::TConfig TConfig;
static void multiply_3x3(const Matrix &A, const Vector &B, Vector &C, ViewType view)
{
if (TConfig::memSpace == AMGX_host)
{
if (A.hasProps(DIAG))
{
multiply_common_sqblock_host_diag(A, B, C);
}
else
{
multiply_common_sqblock_host_nodiag(A, B, C);
}
}
else
{
//TODO: compare with cublas
typedef typename TConfig::IndPrec IndexType;
typedef typename TConfig::MatPrec ValueTypeA;
typedef typename TConfig::VecPrec ValueTypeB;
int num_rows, offset;
A.getOffsetAndSizeForView(view, &offset, &num_rows);
const IndexType *A_row_offsets_ptr = A.row_offsets.raw();
const IndexType *A_column_indices_ptr = A.col_indices.raw();
const IndexType *A_dia_ind_ptr = A.diag.raw();
const ValueTypeA *A_nonzero_values_ptr = A.values.raw();
const ValueTypeB *B_ptr = B.raw();
ValueTypeB *C_ptr = C.raw();
cudaCheckError();
const int threads_per_block = 64 * 3;
const int blockrows_per_warp = 32 / 3;
const int blockrows_per_cta = (threads_per_block / 32) * blockrows_per_warp;
const int num_blocks = ::min(AMGX_GRID_MAX_SIZE, (int) (num_rows - 1) / blockrows_per_cta + 1); // (int) (A.get_num_rows()-1)/eightwarps_per_block + 1;
if (!A.hasProps(DIAG))
{
if (A.getBlockFormat() == ROW_MAJOR)
{
hipFuncSetCacheConfig(blockDiaCsrMultiplyKernelDiaProps<IndexType, ValueTypeA, ValueTypeB, blockrows_per_cta, blockrows_per_warp, 3, 0, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( blockDiaCsrMultiplyKernelDiaProps<IndexType, ValueTypeA, ValueTypeB, blockrows_per_cta, blockrows_per_warp, 3, 0, true>) , dim3(num_blocks), dim3(threads_per_block), 0, 0, A_row_offsets_ptr, A_column_indices_ptr, A_dia_ind_ptr, A_nonzero_values_ptr, B_ptr, C_ptr, offset + num_rows, offset);
}
else
{
hipFuncSetCacheConfig(blockDiaCsrMultiplyKernelDiaProps<IndexType, ValueTypeA, ValueTypeB, blockrows_per_cta, blockrows_per_warp, 3, 0, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( blockDiaCsrMultiplyKernelDiaProps<IndexType, ValueTypeA, ValueTypeB, blockrows_per_cta, blockrows_per_warp, 3, 0, false>) , dim3(num_blocks), dim3(threads_per_block), 0, 0, A_row_offsets_ptr, A_column_indices_ptr, A_dia_ind_ptr, A_nonzero_values_ptr, B_ptr, C_ptr, offset + num_rows, offset);
}
}
else
{
if (A.getBlockFormat() == ROW_MAJOR)
{
hipFuncSetCacheConfig(blockDiaCsrMultiplyKernelDiaProps<IndexType, ValueTypeA, ValueTypeB, blockrows_per_cta, blockrows_per_warp, 3, 1, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( blockDiaCsrMultiplyKernelDiaProps<IndexType, ValueTypeA, ValueTypeB, blockrows_per_cta, blockrows_per_warp, 3, 1, true>) , dim3(num_blocks), dim3(threads_per_block), 0, 0, A_row_offsets_ptr, A_column_indices_ptr, A_dia_ind_ptr, A_nonzero_values_ptr, B_ptr, C_ptr, offset + num_rows, offset);
}
else
{
hipFuncSetCacheConfig(blockDiaCsrMultiplyKernelDiaProps<IndexType, ValueTypeA, ValueTypeB, blockrows_per_cta, blockrows_per_warp, 3, 1, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( blockDiaCsrMultiplyKernelDiaProps<IndexType, ValueTypeA, ValueTypeB, blockrows_per_cta, blockrows_per_warp, 3, 1, false>) , dim3(num_blocks), dim3(threads_per_block), 0, 0, A_row_offsets_ptr, A_column_indices_ptr, A_dia_ind_ptr, A_nonzero_values_ptr, B_ptr, C_ptr, offset + num_rows, offset);
}
}
cudaCheckError();
}
}
};
// -------------------------------
// Explict instantiations
// -------------------------------
#define AMGX_CASE_LINE(CASE) template void multiplyMM(const Matrix<TemplateMode<CASE>::Type>&, const Matrix<TemplateMode<CASE>::Type>&, Matrix<TemplateMode<CASE>::Type>&);
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template void multiply(Matrix<TemplateMode<CASE>::Type> &, Vector<TemplateMode<CASE>::Type>&, Vector<TemplateMode<CASE>::Type> &, ViewType);
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template void multiply_masked(Matrix<TemplateMode<CASE>::Type> &, Vector<TemplateMode<CASE>::Type> &, Vector<TemplateMode<CASE>::Type> &, typename Matrix<TemplateMode<CASE>::Type>::IVector &, ViewType);
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template void multiply_with_mask(Matrix<TemplateMode<CASE>::Type> &, Vector<TemplateMode<CASE>::Type>&, Vector<TemplateMode<CASE>::Type> &);
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template void multiply_with_mask_restriction(Matrix<TemplateMode<CASE>::Type> &, Vector<TemplateMode<CASE>::Type>&, Vector<TemplateMode<CASE>::Type> &, Matrix<TemplateMode<CASE>::Type> & );
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
} // namespace amgx
|
0ffe0333f6360cba17de5ce2b49e180d55d02307.cu
|
/* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <multiply.h>
#include <basic_types.h>
#include <texture.h>
#include <util.h>
#include <cutil.h>
#ifdef _WIN32
#pragma warning (push)
#pragma warning (disable : 4244 4267 4521)
#endif
#include <cusp/multiply.h>
#include <matrix.h>
#include <matrix_cusp.h>
#include <amgx_cusparse.h>
#ifdef _WIN32
#pragma warning (pop)
#endif
#include <sm_utils.inl>
#include <amgx_types/math.h>
#include <amgx_types/util.h>
namespace amgx
{
#define USE_EXPERIMENTAL_4x4
template <class Matrix, class Vector>
class Multiply_1x1;
template <class Matrix, class Vector, class IVector>
class Multiply_1x1_masked;
template <class Matrix, class Vector>
class Multiply_1x1_with_mask;
template <class Matrix, class Vector>
class Multiply_1x1_with_mask_restriction;
template <class Matrix, class Vector>
class Multiply_3x3;
template <class Matrix, class Vector>
class Multiply_4x4;
template <class Matrix, class Vector>
class Multiply_bxb;
template <typename TConfig>
void multiply_block_size(Matrix<TConfig> &A, Vector<TConfig> &B, Vector<TConfig> &C, ViewType view)
{
typedef Matrix<TConfig> TMatrix;
typedef Vector<TConfig> TVector;
if (A.get_block_size() == 1)
{
Multiply_1x1<TMatrix, TVector>::multiply_1x1(A, B, C, view);
}
else if (A.get_block_dimy() == 3 && A.get_block_dimx() == 3)
{
Multiply_3x3<TMatrix, TVector>::multiply_3x3(A, B, C, view);
}
else if (A.get_block_dimy() == 4 && A.get_block_dimx() == 4)
{
Multiply_4x4<TMatrix, TVector>::multiply_4x4(A, B, C, view);
}
else
{
Multiply_bxb<TMatrix, TVector>::multiply_bxb(A, B, C, view);
}
}
template <typename TConfig>
void multiply(Matrix<TConfig> &A, Vector<TConfig> &B, Vector<TConfig> &C, ViewType view)
{
typedef Matrix<TConfig> TMatrix;
typedef Vector<TConfig> TVector;
if (!A.is_initialized())
{
FatalError("Trying to multiply uninitialized matrix", AMGX_ERR_BAD_PARAMETERS);
}
if (A.get_block_dimx() != B.get_block_dimy())
{
std::stringstream ss;
ss << "Matrix and vector dimensions don't match: A.dimx == " << A.get_block_dimx() << ", B.dimy == " << B.get_block_dimy();
FatalError(ss.str().c_str(), AMGX_ERR_BAD_PARAMETERS);
}
typedef Matrix<TConfig> TMatrix;
typedef Vector<TConfig> TVector;
bool latencyHiding = (view == A.getViewExterior() && A.getViewInterior() != A.getViewExterior() && !A.is_matrix_singleGPU() && B.dirtybit != 0);
if (latencyHiding)
{
A.manager->exchange_halo_split_gather(B, B.tag);
// Multiply interior rows
multiply_block_size(A, B, C, A.getViewInterior());
// Finish halo exchange
A.manager->exchange_halo_split_finish(B, B.tag);
// Multiply rows with halo dependencies
ViewType bnd_view = (ViewType)(~(A.getViewInterior()) & A.getViewExterior());
multiply_block_size(A, B, C, bnd_view);
}
else
{
if (view != INTERIOR && !A.is_matrix_singleGPU() && B.dirtybit != 0)
{
A.manager->exchange_halo_v2(B, B.tag);
}
multiply_block_size(A, B, C, view);
}
C.dirtybit = 1;
C.set_block_dimy(A.get_block_dimx());
}
template <class TConfig>
void multiply_masked(Matrix<TConfig> &A, Vector<TConfig> &B, Vector<TConfig> &C, typename Matrix<TConfig>::IVector &mask, ViewType view)
{
typedef Matrix<TConfig> TMatrix;
typedef Vector<TConfig> TVector;
typedef typename Matrix<TConfig>::IVector TIVector;
if (!A.is_initialized())
{
FatalError("Trying to multiply uninitialized matrix", AMGX_ERR_BAD_PARAMETERS);
}
if(A.get_block_size() != 1)
{
FatalError("Unsupported blocksize for multiply_masked()", AMGX_ERR_BAD_PARAMETERS);
}
if (A.get_block_dimx() != B.get_block_dimy())
{
std::stringstream ss;
ss << "Matrix and vector dimensions don't match: A.dimx == " << A.get_block_dimx() << ", B.dimy == " << B.get_block_dimy();
FatalError(ss.str().c_str(), AMGX_ERR_BAD_PARAMETERS);
}
Multiply_1x1_masked<TMatrix, TVector, TIVector>::multiply_1x1_masked(A, B, C, mask, view);
C.set_block_dimy(A.get_block_dimx());
}
template<class Matrix, class Vector>
void multiply_with_mask(Matrix &A, Vector &B, Vector &C)
{
if (!A.is_initialized())
{
FatalError("Trying to multiply uninitialized matrix", AMGX_ERR_BAD_PARAMETERS);
}
if (A.get_block_dimx() != B.get_block_dimy())
{
std::stringstream ss;
ss << "Matrix and vector dimensions don't match: A.dimx == " << A.get_block_dimx() << ", B.dimy == " << B.get_block_dimy();
FatalError(ss.str().c_str(), AMGX_ERR_BAD_PARAMETERS);
}
if (A.get_block_size() == 1)
{
Multiply_1x1_with_mask<Matrix, Vector>::multiply_1x1(A, B, C);
}
else
{
FatalError("multiply with mask not supported for bsize != 1", AMGX_ERR_NOT_IMPLEMENTED);
}
C.set_block_dimy(A.get_block_dimx());
C.dirtybit = 1;
//if (!A.is_matrix_singleGPU() && C.size() == B.size() && C.delayed_send==0)
// A.manager->exchange_halo_async(C, C.tag);
}
template<class Matrix, class Vector>
void multiply_with_mask_restriction(Matrix &A, Vector &B, Vector &C, Matrix &P)
{
if (!A.is_initialized())
{
FatalError("Trying to multiply uninitialized matrix", AMGX_ERR_BAD_PARAMETERS);
}
if (A.get_block_dimx() != B.get_block_dimy())
{
std::stringstream ss;
ss << "Matrix and vector dimensions don't match: A.dimx == " << A.get_block_dimx() << ", B.dimy == " << B.get_block_dimy();
FatalError(ss.str().c_str(), AMGX_ERR_BAD_PARAMETERS);
}
if (A.get_block_size() == 1)
{
Multiply_1x1_with_mask_restriction<Matrix, Vector>::multiply_1x1(A, B, C, P);
}
else
{
FatalError("multiply with mask not supported for bsize != 1", AMGX_ERR_NOT_IMPLEMENTED);
}
C.set_block_dimy(A.get_block_dimx());
C.dirtybit = 1;
}
template<class TConfig>
void multiplyMM(const Matrix<TConfig> &A, const Matrix<TConfig> &B, Matrix<TConfig> &C)
{
if (!A.is_initialized())
{
FatalError("Trying to multiply uninitialized matrix", AMGX_ERR_BAD_PARAMETERS);
}
if (A.get_block_dimx() != B.get_block_dimx() || A.get_block_dimy() != B.get_block_dimy())
{
FatalError("Matrices dimensions do not match", AMGX_ERR_BAD_PARAMETERS);
}
if (TConfig::memSpace == AMGX_device)
{
FatalError("Error, multiplyMM not implemented on device", AMGX_ERR_BAD_PARAMETERS);
}
else
{
if (A.get_block_size() != 1)
{
FatalError("multiplyMM only works for block_size ==1", AMGX_ERR_NOT_IMPLEMENTED);
}
else
{
typedef typename TConfig::IndPrec IndexType;
typedef typename TConfig::MatPrec ValueType;
typedef typename Matrix<TConfig>::IVector IVector;
typedef typename Matrix<TConfig>::MVector MVector;
C.set_initialized(0);
IndexType num_nonzeros = 0;
IVector mask(B.get_num_cols(), IndexType (-1));
// Compute nnz in C (including explicit zeros)
for (size_t i = 0; i < A.get_num_rows(); i++)
{
for (IndexType jj = A.row_offsets[i]; jj < A.row_offsets[i + 1]; jj++)
{
IndexType j = A.col_indices[jj];
for (IndexType kk = B.row_offsets[j]; kk < B.row_offsets[j + 1]; kk++)
{
IndexType k = B.col_indices[kk];
if (mask[k] != i)
{
mask[k] = i;
num_nonzeros++;
}
}
}
}
// Resize output
C.resize(A.get_num_rows(), B.get_num_cols(), num_nonzeros);
const IndexType unseen = static_cast<IndexType>(-1);
const IndexType init = static_cast<IndexType>(-2);
// Compute entries of C
IVector next(B.get_num_cols(), unseen);
MVector sums(B.get_num_cols(), types::util<ValueType>::get_zero());
num_nonzeros = 0;
C.row_offsets[0] = 0;
for (size_t i = 0; i < A.get_num_rows(); i++)
{
IndexType head = init;
IndexType length = 0;
IndexType jj_start = A.row_offsets[i];
IndexType jj_end = A.row_offsets[i + 1];
for (IndexType jj = jj_start; jj < jj_end; jj++)
{
IndexType j = A.col_indices[jj];
ValueType v = A.values[jj];
IndexType kk_start = B.row_offsets[j];
IndexType kk_end = B.row_offsets[j + 1];
for (IndexType kk = kk_start; kk < kk_end; kk++)
{
IndexType k = B.col_indices[kk];
sums[k] = sums[k] + v * B.values[kk];
if (next[k] == unseen)
{
next[k] = head;
head = k;
length++;
}
}
}
for (IndexType jj = 0; jj < length; jj++)
{
//if(sums[head] != ValueType(0))
//{
C.col_indices[num_nonzeros] = head;
C.values[num_nonzeros] = sums[head];
num_nonzeros++;
//}
IndexType temp = head;
head = next[head];
// clear arrays
next[temp] = unseen;
sums[temp] = types::util<ValueType>::get_zero();
}
C.row_offsets[i + 1] = num_nonzeros;
}
// Resize output again since pass2 omits explict zeros
//C.resize(A.num_rows, B.num_cols, num_nonzeros);
C.set_initialized(1);
}
}
}
// --------------------------------
// KERNELS
// --------------------------------
template<typename IndexType, typename ValueTypeA, typename ValueTypeB, int eighthwarps_per_block, int bsize, int log_bsize, int half_bsize, bool ROW_MAJOR>
__global__
void blockDiaCsrMultiplyKernel(const IndexType *row_offsets,
const IndexType *column_indices,
const ValueTypeA *nonzero_values,
const ValueTypeB *B,
ValueTypeB *C,
const IndexType num_block_rows,
const IndexType row_offset)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
int eighthwarp_id = row_offset + (tid >> log_bsize);
const int block_eighthwarp_id = threadIdx.x >> log_bsize;
const int vec_entry_index = threadIdx.x & (bsize - 1);
volatile __shared__ ValueTypeB s_xtemp[ bsize * eighthwarps_per_block ];
ValueTypeB C_temp;
int offset, s_offset;
ValueTypeB temp[bsize];
while (eighthwarp_id < num_block_rows)
{
//i = eighthwarp_id;
C_temp = types::util<ValueTypeB>::get_zero();
// Contribution from each nonzero column
int jmax = row_offsets[eighthwarp_id + 1];
for (int jind = row_offsets[eighthwarp_id]; jind < jmax; jind++)
{
IndexType jcol = column_indices[jind];
offset = jcol * bsize + vec_entry_index;
types::util<ValueTypeB>::volcast(__cachingLoad(&B[offset]), s_xtemp + threadIdx.x);
// Load nonzero_values
s_offset = block_eighthwarp_id * bsize;
if (ROW_MAJOR)
{
offset = jind * bsize * bsize + vec_entry_index * bsize;
loadAsVector<bsize>(nonzero_values + offset, temp);
}
else
{
offset = jind * bsize * bsize + vec_entry_index;
#pragma unroll
for (int m = 0; m < bsize; m++)
{
types::util<ValueTypeA>::to_uptype(nonzero_values[offset + bsize * m], temp[m]);
}
}
#pragma unroll
for (int m = 0; m < bsize; m++)
{
C_temp = C_temp + temp[m] * types::util<ValueTypeB>::volcast(s_xtemp[s_offset + m]);
}
}
C[eighthwarp_id * bsize + vec_entry_index] = C_temp;
eighthwarp_id += gridDim.x * blockDim.x >> log_bsize;
}
}
#ifdef USE_EXPERIMENTAL_4x4
template< typename IndexType, typename ValueTypeA, typename ValueTypeB, int CTA_SIZE, bool ROW_MAJOR >
__global__
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
__launch_bounds__( CTA_SIZE, 16 )
#elif defined(__CUDA_ARCH__)
__launch_bounds__( CTA_SIZE, 16 )
#endif
void blockDiaCsrMultiplyKernelDiaProps_4x4( const IndexType *row_offsets,
const IndexType *column_indices,
const IndexType *dia_ptr,
const ValueTypeA *nonzero_values,
const ValueTypeB *B,
ValueTypeB *C,
const int num_block_rows,
const int row_offset )
{
const int nHalfWarps = CTA_SIZE / 16; // Number of half warps per CTA.
const int laneId = threadIdx.x % warpSize;
const int halfWarpId = threadIdx.x / 16;
const int halfLaneId = threadIdx.x % 16;
const int halfLaneId_div_4 = halfLaneId / 4;
const int halfLaneId_mod_4 = halfLaneId % 4;
const int laneId_div_16 = laneId / 16;
const int upperHalf = 16 * laneId_div_16;
// Shared memory needed to exchange X and delta.
__shared__ volatile ValueTypeB s_mem[CTA_SIZE];
// Each thread keeps its own pointer to shared memory to avoid some extra computations.
volatile ValueTypeB *my_s_mem = &s_mem[16 * halfWarpId];
// Iterate over the rows of the matrix. One warp per two rows.
for ( int aRowId = blockIdx.x * nHalfWarps + halfWarpId ; aRowId < num_block_rows ; aRowId += gridDim.x * nHalfWarps )
{
unsigned int active_mask = utils::activemask();
// Load one block of B.
ValueTypeB my_Ax = types::util<ValueTypeB>::get_zero();
// The diagonal.
if ( halfLaneId_div_4 == 0 )
{
types::util<ValueTypeB>::volcast( B[4 * aRowId + halfLaneId_mod_4], my_s_mem + halfLaneId);
}
// Load the diagonal.
int diagId = dia_ptr[aRowId];
// Update my values.
ValueTypeA my_val = nonzero_values[16 * diagId + halfLaneId];
if ( ROW_MAJOR )
{
my_Ax = my_Ax + my_val * types::util<ValueTypeB>::volcast(my_s_mem[halfLaneId_mod_4]);
}
else
{
my_Ax = my_Ax + my_val * types::util<ValueTypeB>::volcast(my_s_mem[halfLaneId_div_4]);
}
// The range of the rows.
int aColBegin = row_offsets[aRowId + 0];
int aColEnd = row_offsets[aRowId + 1];
// Each warp load column indices of 16 nonzero blocks
for ( ; utils::any( aColBegin < aColEnd, active_mask ) ; aColBegin += 16 )
{
int aColIt = aColBegin + halfLaneId;
// Get the ID of the column.
int aColId = -1;
if ( aColIt < aColEnd )
{
aColId = column_indices[aColIt];
}
// Count the number of active columns.
int vote = utils::ballot(aColId != -1, active_mask);
// The number of iterations.
int nCols = max( __popc( vote & 0x0000ffff ), __popc( vote & 0xffff0000 ) );
// Loop over columns. We compute 8 columns per iteration.
for ( int k = 0 ; k < nCols ; k += 4 )
{
int my_k = k + halfLaneId_div_4;
// Exchange column indices.
int waColId = utils::shfl( aColId, upperHalf + my_k, warpSize, active_mask );
// Load 8 blocks of X if needed.
ValueTypeB my_x = types::util<ValueTypeB>::get_zero();
if ( waColId != -1 )
{
my_x = B[4 * waColId + halfLaneId_mod_4];
}
types::util<ValueTypeB>::volcast( my_x, my_s_mem + halfLaneId);
// Load 8 blocks of A.
#pragma unroll
for ( int i = 0 ; i < 4 ; ++i )
{
int w_aColTmp = aColBegin + k + i, w_aColIt = -1;
if ( w_aColTmp < aColEnd )
{
w_aColIt = w_aColTmp;
}
ValueTypeA my_val = types::util<ValueTypeA>::get_zero();
if ( w_aColIt != -1 )
{
my_val = nonzero_values[16 * w_aColIt + halfLaneId];
}
if ( ROW_MAJOR )
{
my_Ax = my_Ax + my_val * types::util<ValueTypeB>::volcast(my_s_mem[4 * i + halfLaneId_mod_4]);
}
else
{
my_Ax = my_Ax + my_val * types::util<ValueTypeB>::volcast(my_s_mem[4 * i + halfLaneId_div_4]);
}
}
} // Loop over k
} // Loop over aColIt
// Reduce bmAx terms.
if ( ROW_MAJOR )
{
my_Ax = my_Ax + utils::shfl_xor( my_Ax, 1, warpSize, active_mask );
my_Ax = my_Ax + utils::shfl_xor( my_Ax, 2, warpSize, active_mask );
}
else
{
my_Ax = my_Ax + utils::shfl_xor( my_Ax, 4, warpSize, active_mask );
my_Ax = my_Ax + utils::shfl_xor( my_Ax, 8, warpSize, active_mask );
}
// Store the results.
if ( ROW_MAJOR )
{
if ( halfLaneId_mod_4 == 0 )
{
C[4 * aRowId + halfLaneId_div_4] = my_Ax;
}
}
else
{
if ( halfLaneId_div_4 == 0 )
{
C[4 * aRowId + halfLaneId_mod_4] = my_Ax;
}
}
}
}
#else
template<typename IndexType, typename ValueTypeA, typename ValueTypeB, int eighthwarps_per_block, int bsize, int log_bsize, int half_bsize, int bsize_sq, bool ROW_MAJOR>
__global__
void blockDiaCsrMultiplyKernelDiaProps_4x4(const IndexType *row_offsets,
const IndexType *column_indices,
const IndexType *dia_ptr,
const ValueTypeA *nonzero_values,
const ValueTypeB *B,
ValueTypeB *C,
const int num_block_rows,
const int row_offset)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
int eighthwarp_id = row_offset + (tid >> log_bsize);
const int block_eighthwarp_id = threadIdx.x >> log_bsize;
const int vec_entry_index = threadIdx.x & (bsize - 1);
volatile __shared__ ValueTypeB s_xtemp[ bsize * eighthwarps_per_block ];
ValueTypeB C_temp;
int offset, s_offset;
while (eighthwarp_id < num_block_rows)
{
//i = eighthwarp_id;
C_temp = types::util<ValueTypeB>::get_zero();
// Contribution from diagonal
offset = eighthwarp_id * bsize + vec_entry_index;
types::util<ValueTypeB>::volcast(__cachingLoad(&B[offset]), s_xtemp + threadIdx.x);
// Load dia_values and do matrix multiply
s_offset = block_eighthwarp_id * bsize;
ValueTypeA temp[bsize];
if (ROW_MAJOR)
{
loadAsVector<bsize>(nonzero_values + bsize_sq * dia_ptr[eighthwarp_id] + vec_entry_index * bsize, temp);
}
else
{
#pragma unroll
for (int m = 0; m < bsize; m++)
{
temp[m] = nonzero_values[bsize_sq * dia_ptr[eighthwarp_id] + vec_entry_index + bsize * m];
}
}
#pragma unroll
for (int m = 0; m < bsize; m++)
{
C_temp = C_temp + temp[m] * types::util<ValueTypeB>::volcast(s_xtemp[s_offset + m]);
}
// Contribution from each nonzero column
int jmax = row_offsets[eighthwarp_id + 1];
for (int jind = row_offsets[eighthwarp_id]; jind < jmax; jind++)
{
IndexType jcol = column_indices[jind];
offset = jcol * bsize + vec_entry_index;
types::util<ValueTypeB>::volcast(__cachingLoad(&B[offset]), s_xtemp + threadIdx.x);
// Load nonzero_values
s_offset = block_eighthwarp_id * bsize;
if (ROW_MAJOR)
{
offset = jind * bsize_sq + vec_entry_index * bsize;
loadAsVector<bsize>(nonzero_values + offset, temp);
}
else
{
offset = jind * bsize_sq + vec_entry_index;
#pragma unroll
for (int m = 0; m < bsize; m++)
{
temp[m] = nonzero_values[offset + bsize * m];
}
}
#pragma unroll
for (int m = 0; m < bsize; m++)
{
C_temp = C_temp + temp[m] * types::util<ValueTypeB>::volcast(s_xtemp[s_offset + m]);
}
}
C[eighthwarp_id * bsize + vec_entry_index] = C_temp;
eighthwarp_id += gridDim.x * blockDim.x >> log_bsize;
}
}
#endif
// implementation for arbitrary block size
template<typename IndexType, typename ValueTypeA, typename ValueTypeB, int blockrows_per_cta, int blockrows_per_warp, int bsize, int diag, bool ROW_MAJOR>
__global__
void blockDiaCsrMultiplyKernelDiaProps(const IndexType *row_offsets,
const IndexType *column_indices,
const IndexType *dia_ptr,
const ValueTypeA *nonzero_values,
const ValueTypeB *B,
ValueTypeB *C,
const int num_block_rows,
const int row_offset)
{
int warp_id = threadIdx.x / 32;
int warp_thread_id = threadIdx.x & 31;
// padding row blocks to fit in a single warp
if ( warp_thread_id >= blockrows_per_warp * bsize ) { return; }
// new thread id with padding
int tid = warp_id * blockrows_per_warp * bsize + warp_thread_id;
int eighthwarp_id = row_offset + blockIdx.x * blockrows_per_cta + tid / bsize;
const int block_eighthwarp_id = tid / bsize;
const int vec_entry_index = tid % bsize;
const int bsize_sq = bsize * bsize;
volatile __shared__ ValueTypeB s_xtemp[ bsize * blockrows_per_cta ];
ValueTypeB C_temp;
int offset, s_offset;
while (eighthwarp_id < num_block_rows)
{
C_temp = types::util<ValueTypeB>::get_zero();
ValueTypeB temp[bsize];
if ( diag )
{
// Contribution from diagonal
offset = eighthwarp_id * bsize + vec_entry_index;
types::util<ValueTypeB>::volcast( __cachingLoad(&B[offset]), s_xtemp + tid);
// Load dia_values and do matrix multiply
s_offset = block_eighthwarp_id * bsize;
if (ROW_MAJOR)
{
loadAsVector<bsize>(nonzero_values + bsize_sq * dia_ptr[eighthwarp_id] + vec_entry_index * bsize, temp);
}
else
{
offset = dia_ptr[eighthwarp_id] * bsize_sq + vec_entry_index;
#pragma unroll
for (int m = 0; m < bsize; m++)
{
types::util<ValueTypeA>::to_uptype(nonzero_values[offset + bsize * m], temp[m]);
}
}
#pragma unroll
for (int m = 0; m < bsize; m++)
{
C_temp = C_temp + temp[m] * types::util<ValueTypeB>::volcast(s_xtemp[s_offset + m]);
}
}
// Contribution from each nonzero column
int jmax = row_offsets[eighthwarp_id + 1];
for (int jind = row_offsets[eighthwarp_id]; jind < jmax; jind++)
{
IndexType jcol = column_indices[jind];
offset = jcol * bsize + vec_entry_index;
types::util<ValueTypeB>::volcast( __cachingLoad(&B[offset]), s_xtemp + tid);
// Load nonzero_values
s_offset = block_eighthwarp_id * bsize;
if (ROW_MAJOR)
{
offset = jind * bsize_sq + vec_entry_index * bsize;
loadAsVector<bsize>(nonzero_values + offset, temp);
}
else
{
offset = jind * bsize_sq + vec_entry_index;
#pragma unroll
for (int m = 0; m < bsize; m++)
{
types::util<ValueTypeA>::to_uptype(nonzero_values[offset + bsize * m], temp[m]);
}
}
#pragma unroll
for (int m = 0; m < bsize; m++)
{
C_temp = C_temp + temp[m] * types::util<ValueTypeB>::volcast(s_xtemp[s_offset + m]);
}
}
C[eighthwarp_id * bsize + vec_entry_index] = C_temp;
eighthwarp_id += gridDim.x * blockrows_per_cta;
}
}
// --------------------------------------
// Methods
// -------------------------------------
// Method to perform BSPmV on host using block_dia_csr_matrix format
template <class Matrix, class Vector>
void multiply_common_sqblock_host_diag(const Matrix &A, const Vector &B, Vector &C)
{
typedef typename Matrix::TConfig TConfig;
if (TConfig::memSpace == AMGX_device)
{
FatalError("Executrion path error: device matrix in host path", AMGX_ERR_NOT_IMPLEMENTED);
}
else
{
//TODO:: This implementation is very inneficient, Use BLAS
typedef typename TConfig::IndPrec IndexType;
typedef typename TConfig::MatPrec ValueType;
typedef typename Vector::value_type ValueTypeB;
IndexType bsize = A.get_block_dimy();
ValueTypeB temp;
for (int i = 0; i < A.get_num_rows(); i++)
{
// Initialize RHS to 0
for (int m = 0; m < bsize; m++)
{
C[i * bsize + m] = types::util<ValueTypeB>::get_zero();
}
// Contribution from diagonal blocks
for (int n = 0; n < bsize; n++)
{
temp = B[i * bsize + n];
for (int m = 0; m < bsize; m++)
{
C[i * bsize + m] = C[i * bsize + m] + A.values[A.diag[i] * bsize * bsize + m * bsize + n] * temp;
}
}
// Contribution from nonzero off-diagonal blocks
for (int j = A.row_offsets[i]; j < A.row_offsets[i + 1]; j++)
{
IndexType jcol = A.col_indices[j];
for (int n = 0; n < bsize; n++)
{
temp = B[jcol * bsize + n];
for (int m = 0; m < bsize; m++)
{
C[i * bsize + m] = C[i * bsize + m] + A.values[j * bsize * bsize + m * bsize + n] * temp;
}
}
}
}
}
}
template <class Matrix, class Vector>
void multiply_common_sqblock_host_nodiag(const Matrix &A, const Vector &B, Vector &C)
{
typedef typename Matrix::TConfig TConfig;
if (TConfig::memSpace == AMGX_device)
{
FatalError("Executrion path error: device matrix in host path", AMGX_ERR_NOT_IMPLEMENTED);
}
else
{
//TODO:: This implementation is very inneficient, Use BLAS
typedef typename TConfig::IndPrec IndexType;
typedef typename TConfig::MatPrec ValueType;
typedef typename Vector::value_type ValueTypeB;
IndexType bsize = A.get_block_dimy();
ValueTypeB temp;
for (int i = 0; i < A.get_num_rows(); i++)
{
// Initialize RHS to 0
for (int m = 0; m < bsize; m++)
{
C[i * bsize + m] = types::util<ValueTypeB>::get_zero();
}
// Contribution from nonzero blocks
for (int j = A.row_offsets[i]; j < A.row_offsets[i + 1]; j++)
{
IndexType jcol = A.col_indices[j];
for (int n = 0; n < bsize; n++)
{
temp = B[jcol * bsize + n];
for (int m = 0; m < bsize; m++)
{
C[i * bsize + m] = C[i * bsize + m] + A.values[j * bsize * bsize + m * bsize + n] * temp;
}
}
}
}
}
}
template <class Matrix, class Vector>
class Multiply_1x1
{
public:
typedef typename Matrix::TConfig TConfig;
static void multiply_1x1(Matrix &A, Vector &B, Vector &C, ViewType view)
{
if (TConfig::memSpace == AMGX_host)
{
if (A.hasProps(DIAG))
{
multiply_common_sqblock_host_diag(A, B, C);
}
else
{
multiply_common_sqblock_host_nodiag(A, B, C);
}
}
else
{
typedef typename TConfig::VecPrec ValueTypeB;
Cusparse::bsrmv<TConfig>(types::util<ValueTypeB>::get_one(), A, B, types::util<ValueTypeB>::get_zero(), C, view);
cudaCheckError();
// FatalError("Mixed precision is not supported for scalar matrix type", AMGX_ERR_NOT_IMPLEMENTED);
}
}
};
template <class Matrix, class Vector, class IVector>
class Multiply_1x1_masked
{
public:
typedef typename Matrix::TConfig TConfig;
static void multiply_1x1_masked(Matrix &A, Vector &B, Vector &C, IVector mask, ViewType view)
{
if (TConfig::memSpace == AMGX_host)
{
FatalError("Masked multiply is not supported on host", AMGX_ERR_NOT_IMPLEMENTED);
}
else
{
typedef typename TConfig::VecPrec ValueTypeB;
Cusparse::bsrxmv<TConfig>(types::util<ValueTypeB>::get_one(), A, B, types::util<ValueTypeB>::get_zero(), C, mask, view);
cudaCheckError();
}
}
};
template <class Matrix, class Vector>
class Multiply_1x1_with_mask
{
public:
typedef typename Matrix::TConfig TConfig;
static void multiply_1x1(Matrix &A, Vector &B, Vector &C)
{
if (TConfig::memSpace == AMGX_host)
{
FatalError("multiply with mask not supported on host", AMGX_ERR_NOT_IMPLEMENTED);
}
else
{
typedef typename TConfig::VecPrec ValueTypeB;
Cusparse::bsrmv_with_mask<TConfig>(types::util<ValueTypeB>::get_one(), A, B, types::util<ValueTypeB>::get_zero(), C );
cudaCheckError();
// FatalError("Mixed precision is not supported for scalar matrix type", AMGX_ERR_NOT_IMPLEMENTED);
}
}
};
template <class Matrix, class Vector>
class Multiply_1x1_with_mask_restriction
{
public:
typedef typename Matrix::TConfig TConfig;
static void multiply_1x1(Matrix &A, Vector &B, Vector &C, Matrix &P)
{
if (TConfig::memSpace == AMGX_host)
{
FatalError("multiply with mask not supported on host", AMGX_ERR_NOT_IMPLEMENTED);
}
else
{
typedef typename TConfig::VecPrec ValueTypeB;
Cusparse::bsrmv_with_mask_restriction<TConfig>(types::util<ValueTypeB>::get_one(), A, B, types::util<ValueTypeB>::get_zero(), C, P);
cudaCheckError();
// FatalError("Mixed precision is not supported for scalar matrix type", AMGX_ERR_NOT_IMPLEMENTED);
}
}
};
// Method to perform BSPmV on device using block_dia_csr_matrix format
template <class Matrix, class Vector>
class Multiply_4x4
{
public:
typedef typename Matrix::TConfig TConfig;
static void multiply_4x4(const Matrix &A, const Vector &B, Vector &C, ViewType view)
{
if (TConfig::memSpace == AMGX_host)
{
if (A.hasProps(DIAG))
{
multiply_common_sqblock_host_diag(A, B, C);
}
else
{
multiply_common_sqblock_host_nodiag(A, B, C);
}
}
else
{
//TODO: compare with cublas
typedef typename TConfig::IndPrec IndexType;
typedef typename TConfig::MatPrec ValueTypeA;
typedef typename TConfig::VecPrec ValueTypeB;
int num_rows, offset;
A.getOffsetAndSizeForView(view, &offset, &num_rows);
const IndexType *A_row_offsets_ptr = A.row_offsets.raw();
const IndexType *A_column_indices_ptr = A.col_indices.raw();
const IndexType *A_dia_ind_ptr = A.diag.raw();
const ValueTypeA *A_nonzero_values_ptr = A.values.raw();
const ValueTypeB *B_ptr = B.raw();
ValueTypeB *C_ptr = C.raw();
cudaCheckError();
const unsigned int threads_per_block = 128;
const int eightwarps_per_block = threads_per_block / 4;
const int num_warps_per_cta = threads_per_block / 32;
const int num_blocks = std::min(AMGX_GRID_MAX_SIZE, (int) (num_rows + num_warps_per_cta - 1) / num_warps_per_cta); // (int) (A.get_num_rows()-1)/eightwarps_per_block + 1;
if (!A.hasProps(DIAG))
{
if (A.getBlockFormat() == ROW_MAJOR)
{
cudaFuncSetCacheConfig(blockDiaCsrMultiplyKernel<IndexType, ValueTypeA, ValueTypeB, eightwarps_per_block, 4, 2, 2, true>, cudaFuncCachePreferL1);
blockDiaCsrMultiplyKernel<IndexType, ValueTypeA, ValueTypeB, eightwarps_per_block, 4, 2, 2, true> <<< num_blocks, threads_per_block>>>(A_row_offsets_ptr, A_column_indices_ptr, A_nonzero_values_ptr, B_ptr, C_ptr, offset + num_rows, offset);
}
else
{
cudaFuncSetCacheConfig(blockDiaCsrMultiplyKernel<IndexType, ValueTypeA, ValueTypeB, eightwarps_per_block, 4, 2, 2, false>, cudaFuncCachePreferL1);
blockDiaCsrMultiplyKernel<IndexType, ValueTypeA, ValueTypeB, eightwarps_per_block, 4, 2, 2, false> <<< num_blocks, threads_per_block>>>(A_row_offsets_ptr, A_column_indices_ptr, A_nonzero_values_ptr, B_ptr, C_ptr, offset + num_rows, offset);
}
}
else
{
#ifdef USE_EXPERIMENTAL_4x4
if ( A.getBlockFormat() == ROW_MAJOR )
{
blockDiaCsrMultiplyKernelDiaProps_4x4<IndexType, ValueTypeA, ValueTypeB, threads_per_block, true > <<< num_blocks, threads_per_block>>>(A_row_offsets_ptr, A_column_indices_ptr, A_dia_ind_ptr, A_nonzero_values_ptr, B_ptr, C_ptr, offset + num_rows, offset);
}
else
{
blockDiaCsrMultiplyKernelDiaProps_4x4<IndexType, ValueTypeA, ValueTypeB, threads_per_block, false > <<< num_blocks, threads_per_block>>>(A_row_offsets_ptr, A_column_indices_ptr, A_dia_ind_ptr, A_nonzero_values_ptr, B_ptr, C_ptr, offset + num_rows, offset);
}
#else
if (A.getBlockFormat() == ROW_MAJOR)
{
cudaFuncSetCacheConfig(blockDiaCsrMultiplyKernelDiaProps_4x4<IndexType, ValueTypeA, ValueTypeB, eightwarps_per_block, 4, 2, 2, 16, true>, cudaFuncCachePreferL1);
blockDiaCsrMultiplyKernelDiaProps_4x4<IndexType, ValueTypeA, ValueTypeB, eightwarps_per_block, 4, 2, 2, 16, true> <<< num_blocks, threads_per_block>>>(A_row_offsets_ptr, A_column_indices_ptr, A_dia_ind_ptr, A_nonzero_values_ptr, B_ptr, C_ptr, offset + num_rows, offset);
}
else
{
cudaFuncSetCacheConfig(blockDiaCsrMultiplyKernelDiaProps_4x4<IndexType, ValueTypeA, ValueTypeB, eightwarps_per_block, 4, 2, 2, 16, false>, cudaFuncCachePreferL1);
blockDiaCsrMultiplyKernelDiaProps_4x4<IndexType, ValueTypeA, ValueTypeB, eightwarps_per_block, 4, 2, 2, 16, false> <<< num_blocks, threads_per_block>>>(A_row_offsets_ptr, A_column_indices_ptr, A_dia_ind_ptr, A_nonzero_values_ptr, B_ptr, C_ptr, offset + num_rows, offset);
}
#endif
}
cudaCheckError();
}
}
};
// Method to perform BSPmV on device using block_dia_csr_matrix format
template <class Matrix, class Vector>
class Multiply_bxb
{
public:
typedef typename Matrix::TConfig TConfig;
static void multiply_bxb(Matrix &A, Vector &B, Vector &C, ViewType view)
{
if (TConfig::memSpace == AMGX_host)
{
if (A.hasProps(DIAG))
{
multiply_common_sqblock_host_diag(A, B, C);
}
else
{
multiply_common_sqblock_host_nodiag(A, B, C);
}
}
else
{
typedef typename TConfig::VecPrec ValueTypeB;
Cusparse::bsrmv(types::util<ValueTypeB>::get_one(), A, B, types::util<ValueTypeB>::get_zero(), C, view);
cudaCheckError();
}
}
};
// Method to perform BSPmV on device using block_dia_csr_matrix format
template <class Matrix, class Vector>
class Multiply_3x3
{
public:
typedef typename Matrix::TConfig TConfig;
static void multiply_3x3(const Matrix &A, const Vector &B, Vector &C, ViewType view)
{
if (TConfig::memSpace == AMGX_host)
{
if (A.hasProps(DIAG))
{
multiply_common_sqblock_host_diag(A, B, C);
}
else
{
multiply_common_sqblock_host_nodiag(A, B, C);
}
}
else
{
//TODO: compare with cublas
typedef typename TConfig::IndPrec IndexType;
typedef typename TConfig::MatPrec ValueTypeA;
typedef typename TConfig::VecPrec ValueTypeB;
int num_rows, offset;
A.getOffsetAndSizeForView(view, &offset, &num_rows);
const IndexType *A_row_offsets_ptr = A.row_offsets.raw();
const IndexType *A_column_indices_ptr = A.col_indices.raw();
const IndexType *A_dia_ind_ptr = A.diag.raw();
const ValueTypeA *A_nonzero_values_ptr = A.values.raw();
const ValueTypeB *B_ptr = B.raw();
ValueTypeB *C_ptr = C.raw();
cudaCheckError();
const int threads_per_block = 64 * 3;
const int blockrows_per_warp = 32 / 3;
const int blockrows_per_cta = (threads_per_block / 32) * blockrows_per_warp;
const int num_blocks = std::min(AMGX_GRID_MAX_SIZE, (int) (num_rows - 1) / blockrows_per_cta + 1); // (int) (A.get_num_rows()-1)/eightwarps_per_block + 1;
if (!A.hasProps(DIAG))
{
if (A.getBlockFormat() == ROW_MAJOR)
{
cudaFuncSetCacheConfig(blockDiaCsrMultiplyKernelDiaProps<IndexType, ValueTypeA, ValueTypeB, blockrows_per_cta, blockrows_per_warp, 3, 0, true>, cudaFuncCachePreferL1);
blockDiaCsrMultiplyKernelDiaProps<IndexType, ValueTypeA, ValueTypeB, blockrows_per_cta, blockrows_per_warp, 3, 0, true> <<< num_blocks, threads_per_block>>>(A_row_offsets_ptr, A_column_indices_ptr, A_dia_ind_ptr, A_nonzero_values_ptr, B_ptr, C_ptr, offset + num_rows, offset);
}
else
{
cudaFuncSetCacheConfig(blockDiaCsrMultiplyKernelDiaProps<IndexType, ValueTypeA, ValueTypeB, blockrows_per_cta, blockrows_per_warp, 3, 0, false>, cudaFuncCachePreferL1);
blockDiaCsrMultiplyKernelDiaProps<IndexType, ValueTypeA, ValueTypeB, blockrows_per_cta, blockrows_per_warp, 3, 0, false> <<< num_blocks, threads_per_block>>>(A_row_offsets_ptr, A_column_indices_ptr, A_dia_ind_ptr, A_nonzero_values_ptr, B_ptr, C_ptr, offset + num_rows, offset);
}
}
else
{
if (A.getBlockFormat() == ROW_MAJOR)
{
cudaFuncSetCacheConfig(blockDiaCsrMultiplyKernelDiaProps<IndexType, ValueTypeA, ValueTypeB, blockrows_per_cta, blockrows_per_warp, 3, 1, true>, cudaFuncCachePreferL1);
blockDiaCsrMultiplyKernelDiaProps<IndexType, ValueTypeA, ValueTypeB, blockrows_per_cta, blockrows_per_warp, 3, 1, true> <<< num_blocks, threads_per_block>>>(A_row_offsets_ptr, A_column_indices_ptr, A_dia_ind_ptr, A_nonzero_values_ptr, B_ptr, C_ptr, offset + num_rows, offset);
}
else
{
cudaFuncSetCacheConfig(blockDiaCsrMultiplyKernelDiaProps<IndexType, ValueTypeA, ValueTypeB, blockrows_per_cta, blockrows_per_warp, 3, 1, false>, cudaFuncCachePreferL1);
blockDiaCsrMultiplyKernelDiaProps<IndexType, ValueTypeA, ValueTypeB, blockrows_per_cta, blockrows_per_warp, 3, 1, false> <<< num_blocks, threads_per_block>>>(A_row_offsets_ptr, A_column_indices_ptr, A_dia_ind_ptr, A_nonzero_values_ptr, B_ptr, C_ptr, offset + num_rows, offset);
}
}
cudaCheckError();
}
}
};
// -------------------------------
// Explict instantiations
// -------------------------------
#define AMGX_CASE_LINE(CASE) template void multiplyMM(const Matrix<TemplateMode<CASE>::Type>&, const Matrix<TemplateMode<CASE>::Type>&, Matrix<TemplateMode<CASE>::Type>&);
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template void multiply(Matrix<TemplateMode<CASE>::Type> &, Vector<TemplateMode<CASE>::Type>&, Vector<TemplateMode<CASE>::Type> &, ViewType);
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template void multiply_masked(Matrix<TemplateMode<CASE>::Type> &, Vector<TemplateMode<CASE>::Type> &, Vector<TemplateMode<CASE>::Type> &, typename Matrix<TemplateMode<CASE>::Type>::IVector &, ViewType);
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template void multiply_with_mask(Matrix<TemplateMode<CASE>::Type> &, Vector<TemplateMode<CASE>::Type>&, Vector<TemplateMode<CASE>::Type> &);
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template void multiply_with_mask_restriction(Matrix<TemplateMode<CASE>::Type> &, Vector<TemplateMode<CASE>::Type>&, Vector<TemplateMode<CASE>::Type> &, Matrix<TemplateMode<CASE>::Type> & );
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
} // namespace amgx
|
39038dbafb1bd22ec6b1d245f321871de5a2814b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <array>
#include <cstddef>
#include <gtest/gtest.h>
#include <iostream>
#include <memory>
#include <numeric>
#include <raft/util/cudart_utils.hpp>
#include <raft/util/device_atomics.cuh>
#include <rmm/cuda_stream_pool.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
namespace raft {
__global__ void test_atomic_inc_warp_kernel(int* counter, int* out_array)
{
int global_tid = blockDim.x * blockIdx.x + threadIdx.x;
out_array[atomicIncWarp(counter)] = global_tid;
}
TEST(Raft, AtomicIncWarp)
{
const int num_blocks = 1024;
const int threads_per_block = 1024;
const int num_elts = num_blocks * threads_per_block;
rmm::cuda_stream_pool pool{1};
auto s = pool.get_stream();
rmm::device_scalar<int> counter{0, s};
rmm::device_uvector<int> out_device{num_elts, s};
std::array<int, num_elts> out_host{0};
// Write all 1M thread indices to a unique location in `out_device`
hipLaunchKernelGGL(( test_atomic_inc_warp_kernel), dim3(num_blocks), dim3(threads_per_block), 0, s, counter.data(),
out_device.data());
// Copy data to host
RAFT_CUDA_TRY(hipMemcpyAsync(out_host.data(),
(const void*)out_device.data(),
num_elts * sizeof(int),
hipMemcpyDeviceToHost,
s));
// Check that count is correct and that each thread index is contained in the
// array exactly once.
ASSERT_EQ(num_elts, counter.value(s)); // NB: accessing the counter synchronizes `s`
std::sort(out_host.begin(), out_host.end());
for (int i = 0; i < num_elts; ++i) {
ASSERT_EQ(i, out_host[i]);
}
}
} // namespace raft
|
39038dbafb1bd22ec6b1d245f321871de5a2814b.cu
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <array>
#include <cstddef>
#include <gtest/gtest.h>
#include <iostream>
#include <memory>
#include <numeric>
#include <raft/util/cudart_utils.hpp>
#include <raft/util/device_atomics.cuh>
#include <rmm/cuda_stream_pool.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
namespace raft {
__global__ void test_atomic_inc_warp_kernel(int* counter, int* out_array)
{
int global_tid = blockDim.x * blockIdx.x + threadIdx.x;
out_array[atomicIncWarp(counter)] = global_tid;
}
TEST(Raft, AtomicIncWarp)
{
const int num_blocks = 1024;
const int threads_per_block = 1024;
const int num_elts = num_blocks * threads_per_block;
rmm::cuda_stream_pool pool{1};
auto s = pool.get_stream();
rmm::device_scalar<int> counter{0, s};
rmm::device_uvector<int> out_device{num_elts, s};
std::array<int, num_elts> out_host{0};
// Write all 1M thread indices to a unique location in `out_device`
test_atomic_inc_warp_kernel<<<num_blocks, threads_per_block, 0, s>>>(counter.data(),
out_device.data());
// Copy data to host
RAFT_CUDA_TRY(cudaMemcpyAsync(out_host.data(),
(const void*)out_device.data(),
num_elts * sizeof(int),
cudaMemcpyDeviceToHost,
s));
// Check that count is correct and that each thread index is contained in the
// array exactly once.
ASSERT_EQ(num_elts, counter.value(s)); // NB: accessing the counter synchronizes `s`
std::sort(out_host.begin(), out_host.end());
for (int i = 0; i < num_elts; ++i) {
ASSERT_EQ(i, out_host[i]);
}
}
} // namespace raft
|
16b6a7dc778defff8fb5df5c1e724f65c8b6178a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* MIT License
*
* Copyright (c) 2021 CSCS, ETH Zurich
* 2021 University of Basel
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*! @file
* @brief Density i-loop GPU driver
*
* @author Sebastian Keller <sebastian.f.keller@gmail.com>
*/
#include "cstone/cuda/cuda_utils.cuh"
#include "cstone/util/tuple.hpp"
#include "cstone/util/util.hpp"
#include "sph/sph_gpu.hpp"
#include "sph/eos.hpp"
namespace sph
{
namespace cuda
{
template<class Tt, class Tm, class Thydro>
__global__ void cudaEOS(size_t firstParticle, size_t lastParticle, Tt mui, Tt gamma, const Tt* temp, const Tm* m,
const Thydro* kx, const Thydro* xm, const Thydro* gradh, Thydro* prho, Thydro* c, Thydro* rho,
Thydro* p)
{
unsigned i = firstParticle + blockDim.x * blockIdx.x + threadIdx.x;
if (i >= lastParticle) return;
Thydro p_i;
Thydro rho_i = kx[i] * m[i] / xm[i];
util::tie(p_i, c[i]) = idealGasEOS(temp[i], rho_i, mui, gamma);
prho[i] = p_i / (kx[i] * m[i] * m[i] * gradh[i]);
if (rho) { rho[i] = rho_i; }
if (p) { p[i] = p_i; }
}
template<class Tt, class Tm, class Thydro>
void computeEOS(size_t firstParticle, size_t lastParticle, Tt mui, Tt gamma, const Tt* temp, const Tm* m,
const Thydro* kx, const Thydro* xm, const Thydro* gradh, Thydro* prho, Thydro* c, Thydro* rho,
Thydro* p)
{
unsigned numThreads = 256;
unsigned numBlocks = iceil(lastParticle - firstParticle, numThreads);
hipLaunchKernelGGL(( cudaEOS), dim3(numBlocks), dim3(numThreads), 0, 0, firstParticle, lastParticle, mui, gamma, temp, m, kx, xm, gradh, prho, c, rho,
p);
checkGpuErrors(hipDeviceSynchronize());
}
#define COMPUTE_EOS(Ttemp, Tm, Thydro) \
template void computeEOS(size_t firstParticle, size_t lastParticle, Ttemp mui, Ttemp gamma, const Ttemp* temp, \
const Tm* m, const Thydro* kx, const Thydro* xm, const Thydro* gradh, Thydro* prho, \
Thydro* c, Thydro* rho, Thydro* p)
COMPUTE_EOS(double, double, double);
COMPUTE_EOS(double, float, double);
COMPUTE_EOS(double, float, float);
COMPUTE_EOS(float, float, float);
} // namespace cuda
} // namespace sph
|
16b6a7dc778defff8fb5df5c1e724f65c8b6178a.cu
|
/*
* MIT License
*
* Copyright (c) 2021 CSCS, ETH Zurich
* 2021 University of Basel
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*! @file
* @brief Density i-loop GPU driver
*
* @author Sebastian Keller <sebastian.f.keller@gmail.com>
*/
#include "cstone/cuda/cuda_utils.cuh"
#include "cstone/util/tuple.hpp"
#include "cstone/util/util.hpp"
#include "sph/sph_gpu.hpp"
#include "sph/eos.hpp"
namespace sph
{
namespace cuda
{
template<class Tt, class Tm, class Thydro>
__global__ void cudaEOS(size_t firstParticle, size_t lastParticle, Tt mui, Tt gamma, const Tt* temp, const Tm* m,
const Thydro* kx, const Thydro* xm, const Thydro* gradh, Thydro* prho, Thydro* c, Thydro* rho,
Thydro* p)
{
unsigned i = firstParticle + blockDim.x * blockIdx.x + threadIdx.x;
if (i >= lastParticle) return;
Thydro p_i;
Thydro rho_i = kx[i] * m[i] / xm[i];
util::tie(p_i, c[i]) = idealGasEOS(temp[i], rho_i, mui, gamma);
prho[i] = p_i / (kx[i] * m[i] * m[i] * gradh[i]);
if (rho) { rho[i] = rho_i; }
if (p) { p[i] = p_i; }
}
template<class Tt, class Tm, class Thydro>
void computeEOS(size_t firstParticle, size_t lastParticle, Tt mui, Tt gamma, const Tt* temp, const Tm* m,
const Thydro* kx, const Thydro* xm, const Thydro* gradh, Thydro* prho, Thydro* c, Thydro* rho,
Thydro* p)
{
unsigned numThreads = 256;
unsigned numBlocks = iceil(lastParticle - firstParticle, numThreads);
cudaEOS<<<numBlocks, numThreads>>>(firstParticle, lastParticle, mui, gamma, temp, m, kx, xm, gradh, prho, c, rho,
p);
checkGpuErrors(cudaDeviceSynchronize());
}
#define COMPUTE_EOS(Ttemp, Tm, Thydro) \
template void computeEOS(size_t firstParticle, size_t lastParticle, Ttemp mui, Ttemp gamma, const Ttemp* temp, \
const Tm* m, const Thydro* kx, const Thydro* xm, const Thydro* gradh, Thydro* prho, \
Thydro* c, Thydro* rho, Thydro* p)
COMPUTE_EOS(double, double, double);
COMPUTE_EOS(double, float, double);
COMPUTE_EOS(double, float, float);
COMPUTE_EOS(float, float, float);
} // namespace cuda
} // namespace sph
|
239b2458ccde2e50a3f6635bd87a0df3e25d02fd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from sparse/blas/zjacobisetup.cu, normal z -> s, Sun Nov 20 20:20:40 2016
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_s
__global__ void
svjacobisetup_gpu( int num_rows,
int num_vecs,
float *b,
float *d,
float *c,
float *x)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ){
for( int i=0; i<num_vecs; i++ ){
c[row+i*num_rows] = b[row+i*num_rows] / d[row];
x[row+i*num_rows] = c[row+i*num_rows];
}
}
}
/**
Purpose
-------
Prepares the Jacobi Iteration according to
x^(k+1) = D^(-1) * b - D^(-1) * (L+U) * x^k
x^(k+1) = c - M * x^k.
Returns the vector c. It calls a GPU kernel
Arguments
---------
@param[in]
num_rows magma_int_t
number of rows
@param[in]
b magma_s_matrix
RHS b
@param[in]
d magma_s_matrix
vector with diagonal entries
@param[out]
c magma_s_matrix*
c = D^(-1) * b
@param[out]
x magma_s_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C" magma_int_t
magma_sjacobisetup_vector_gpu(
magma_int_t num_rows,
magma_s_matrix b,
magma_s_matrix d,
magma_s_matrix c,
magma_s_matrix *x,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( num_rows, BLOCK_SIZE ) );
int num_vecs = b.num_rows / num_rows;
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( svjacobisetup_gpu), dim3(grid), dim3(threads), 0, queue->cuda_stream(),
num_rows, num_vecs, b.dval, d.dval, c.dval, x->val );
return MAGMA_SUCCESS;
}
__global__ void
sjacobidiagscal_kernel( int num_rows,
int num_vecs,
float *b,
float *d,
float *c)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ){
for( int i=0; i<num_vecs; i++)
c[row+i*num_rows] = b[row+i*num_rows] * d[row];
}
}
/**
Purpose
-------
Prepares the Jacobi Iteration according to
x^(k+1) = D^(-1) * b - D^(-1) * (L+U) * x^k
x^(k+1) = c - M * x^k.
Returns the vector c. It calls a GPU kernel
Arguments
---------
@param[in]
num_rows magma_int_t
number of rows
@param[in]
b magma_s_matrix
RHS b
@param[in]
d magma_s_matrix
vector with diagonal entries
@param[out]
c magma_s_matrix*
c = D^(-1) * b
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_s
********************************************************************/
extern "C" magma_int_t
magma_sjacobi_diagscal(
magma_int_t num_rows,
magma_s_matrix d,
magma_s_matrix b,
magma_s_matrix *c,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( num_rows, 512 ));
int num_vecs = b.num_rows*b.num_cols/num_rows;
magma_int_t threads = 512;
hipLaunchKernelGGL(( sjacobidiagscal_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(), num_rows, num_vecs, b.dval, d.dval, c->val );
return MAGMA_SUCCESS;
}
__global__ void
sjacobiupdate_kernel( int num_rows,
int num_cols,
float *t,
float *b,
float *d,
float *x)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ){
for( int i=0; i<num_cols; i++)
x[row+i*num_rows] += (b[row+i*num_rows]-t[row+i*num_rows]) * d[row];
}
}
/**
Purpose
-------
Updates the iteration vector x for the Jacobi iteration
according to
x=x+d.*(b-t)
where d is the diagonal of the system matrix A and t=Ax.
Arguments
---------
@param[in]
t magma_s_matrix
t = A*x
@param[in]
b magma_s_matrix
RHS b
@param[in]
d magma_s_matrix
vector with diagonal entries
@param[out]
x magma_s_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_s
********************************************************************/
extern "C" magma_int_t
magma_sjacobiupdate(
magma_s_matrix t,
magma_s_matrix b,
magma_s_matrix d,
magma_s_matrix *x,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( t.num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( sjacobiupdate_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(), t.num_rows, t.num_cols, t.dval, b.dval, d.dval, x->dval );
return MAGMA_SUCCESS;
}
__global__ void
sjacobispmvupdate_kernel(
int num_rows,
int num_cols,
float * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
float *t,
float *b,
float *d,
float *x )
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
int j;
if(row<num_rows){
float dot = MAGMA_S_ZERO;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( int i=0; i<num_cols; i++){
for( j=start; j<end; j++){
dot += dval[ j ] * x[ dcolind[j]+i*num_rows ];
}
x[row+i*num_rows] += (b[row+i*num_rows]-dot) * d[row];
}
}
}
/**
Purpose
-------
Updates the iteration vector x for the Jacobi iteration
according to
x=x+d.*(b-Ax)
Arguments
---------
@param[in]
maxiter magma_int_t
number of Jacobi iterations
@param[in]
A magma_s_matrix
system matrix
@param[in]
t magma_s_matrix
workspace
@param[in]
b magma_s_matrix
RHS b
@param[in]
d magma_s_matrix
vector with diagonal entries
@param[out]
x magma_s_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_s
********************************************************************/
extern "C" magma_int_t
magma_sjacobispmvupdate(
magma_int_t maxiter,
magma_s_matrix A,
magma_s_matrix t,
magma_s_matrix b,
magma_s_matrix d,
magma_s_matrix *x,
magma_queue_t queue )
{
// local variables
//float c_zero = MAGMA_S_ZERO;
//float c_one = MAGMA_S_ONE;
dim3 grid( magma_ceildiv( t.num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
for( magma_int_t i=0; i<maxiter; i++ ) {
// distinct routines imply synchronization
// magma_s_spmv( c_one, A, *x, c_zero, t, queue ); // t = A * x
//hipLaunchKernelGGL(( sjacobiupdate_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(), t.num_rows, t.num_cols, t.dval, b.dval, d.dval, x->dval );
// merged in one implies asynchronous update
hipLaunchKernelGGL(( sjacobispmvupdate_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(),
t.num_rows, t.num_cols, A.dval, A.drow, A.dcol, t.dval, b.dval, d.dval, x->dval );
}
return MAGMA_SUCCESS;
}
__global__ void
sjacobispmvupdate_bw_kernel(
int num_rows,
int num_cols,
float * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
float *t,
float *b,
float *d,
float *x )
{
int row_tmp = blockDim.x * blockIdx.x + threadIdx.x;
int row = num_rows-1 - row_tmp;
int j;
if( row>-1 ){
float dot = MAGMA_S_ZERO;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( int i=0; i<num_cols; i++){
for( j=start; j<end; j++){
dot += dval[ j ] * x[ dcolind[j]+i*num_rows ];
}
x[row+i*num_rows] += (b[row+i*num_rows]-dot) * d[row];
}
}
}
/**
Purpose
-------
Updates the iteration vector x for the Jacobi iteration
according to
x=x+d.*(b-Ax)
This kernel processes the thread blocks in reversed order.
Arguments
---------
@param[in]
maxiter magma_int_t
number of Jacobi iterations
@param[in]
A magma_s_matrix
system matrix
@param[in]
t magma_s_matrix
workspace
@param[in]
b magma_s_matrix
RHS b
@param[in]
d magma_s_matrix
vector with diagonal entries
@param[out]
x magma_s_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_s
********************************************************************/
extern "C" magma_int_t
magma_sjacobispmvupdate_bw(
magma_int_t maxiter,
magma_s_matrix A,
magma_s_matrix t,
magma_s_matrix b,
magma_s_matrix d,
magma_s_matrix *x,
magma_queue_t queue )
{
// local variables
//float c_zero = MAGMA_S_ZERO;
//float c_one = MAGMA_S_ONE;
dim3 grid( magma_ceildiv( t.num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
for( magma_int_t i=0; i<maxiter; i++ ) {
// distinct routines imply synchronization
// magma_s_spmv( c_one, A, *x, c_zero, t, queue ); // t = A * x
//hipLaunchKernelGGL(( sjacobiupdate_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(), t.num_rows, t.num_cols, t.dval, b.dval, d.dval, x->dval );
// merged in one implies asynchronous update
hipLaunchKernelGGL(( sjacobispmvupdate_bw_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(),
t.num_rows, t.num_cols, A.dval, A.drow, A.dcol, t.dval, b.dval, d.dval, x->dval );
}
return MAGMA_SUCCESS;
}
__global__ void
sjacobispmvupdateselect_kernel(
int num_rows,
int num_cols,
int num_updates,
magma_index_t * indices,
float * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
float *t,
float *b,
float *d,
float *x,
float *y )
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int j;
if( idx<num_updates){
int row = indices[ idx ];
printf(" ");
//if( row < num_rows ){
float dot = MAGMA_S_ZERO;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( int i=0; i<num_cols; i++){
for( j=start; j<end; j++){
dot += dval[ j ] * x[ dcolind[j]+i*num_rows ];
}
x[row+i*num_rows] = x[row+i*num_rows] + (b[row+i*num_rows]-dot) * d[row];
//float add = (b[row+i*num_rows]-dot) * d[row];
//#if defined(PRECISION_s) //|| defined(PRECISION_d)
// atomicAdd( x + row + i*num_rows, add );
//#endif
// ( unsigned int* address, unsigned int val);
//}
}
}
}
/**
Purpose
-------
Updates the iteration vector x for the Jacobi iteration
according to
x=x+d.*(b-Ax)
This kernel allows for overlapping domains: the indices-array contains
the locations that are updated. Locations may be repeated to simulate
overlapping domains.
Arguments
---------
@param[in]
maxiter magma_int_t
number of Jacobi iterations
@param[in]
num_updates magma_int_t
number of updates - length of the indices array
@param[in]
indices magma_index_t*
indices, which entries of x to update
@param[in]
A magma_s_matrix
system matrix
@param[in]
t magma_s_matrix
workspace
@param[in]
b magma_s_matrix
RHS b
@param[in]
d magma_s_matrix
vector with diagonal entries
@param[in]
tmp magma_s_matrix
workspace
@param[out]
x magma_s_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_s
********************************************************************/
extern "C" magma_int_t
magma_sjacobispmvupdateselect(
magma_int_t maxiter,
magma_int_t num_updates,
magma_index_t *indices,
magma_s_matrix A,
magma_s_matrix t,
magma_s_matrix b,
magma_s_matrix d,
magma_s_matrix tmp,
magma_s_matrix *x,
magma_queue_t queue )
{
// local variables
//float c_zero = MAGMA_S_ZERO
//float c_one = MAGMA_S_ONE;
//magma_s_matrix swp;
dim3 grid( magma_ceildiv( num_updates, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
printf("num updates:%d %d %d\n", int(num_updates), int(threads), int(grid.x) );
for( magma_int_t i=0; i<maxiter; i++ ) {
hipLaunchKernelGGL(( sjacobispmvupdateselect_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(),
t.num_rows, t.num_cols, num_updates, indices, A.dval, A.drow, A.dcol, t.dval, b.dval, d.dval, x->dval, tmp.dval );
//swp.dval = x->dval;
//x->dval = tmp.dval;
//tmp.dval = swp.dval;
}
return MAGMA_SUCCESS;
}
__global__ void
sftjacobicontractions_kernel(
int num_rows,
float * xkm2val,
float * xkm1val,
float * xkval,
float * zval,
float * cval )
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if( idx<num_rows ){
zval[idx] = MAGMA_S_MAKE( MAGMA_S_ABS( xkm1val[idx] - xkval[idx] ), 0.0);
cval[ idx ] = MAGMA_S_MAKE(
MAGMA_S_ABS( xkm2val[idx] - xkm1val[idx] )
/ MAGMA_S_ABS( xkm1val[idx] - xkval[idx] )
,0.0 );
}
}
/**
Purpose
-------
Computes the contraction coefficients c_i:
c_i = z_i^{k-1} / z_i^{k}
= | x_i^{k-1} - x_i^{k-2} | / | x_i^{k} - x_i^{k-1} |
Arguments
---------
@param[in]
xkm2 magma_s_matrix
vector x^{k-2}
@param[in]
xkm1 magma_s_matrix
vector x^{k-2}
@param[in]
xk magma_s_matrix
vector x^{k-2}
@param[out]
z magma_s_matrix*
ratio
@param[out]
c magma_s_matrix*
contraction coefficients
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_s
********************************************************************/
extern "C" magma_int_t
magma_sftjacobicontractions(
magma_s_matrix xkm2,
magma_s_matrix xkm1,
magma_s_matrix xk,
magma_s_matrix *z,
magma_s_matrix *c,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( xk.num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( sftjacobicontractions_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(),
xkm2.num_rows, xkm2.dval, xkm1.dval, xk.dval, z->dval, c->dval );
return MAGMA_SUCCESS;
}
__global__ void
sftjacobiupdatecheck_kernel(
int num_rows,
float delta,
float * xold,
float * xnew,
float * zprev,
float * cval,
magma_int_t *flag_t,
magma_int_t *flag_fp )
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if( idx<num_rows ){
float t1 = delta * MAGMA_S_ABS(cval[idx]);
float vkv = 1.0;
for( magma_int_t i=0; i<min( flag_fp[idx], 100 ); i++){
vkv = vkv*2;
}
float xold_l = xold[idx];
float xnew_l = xnew[idx];
float znew = MAGMA_S_MAKE(
max( MAGMA_S_ABS( xold_l - xnew_l), 1e-15), 0.0 );
float znr = zprev[idx] / znew;
float t2 = MAGMA_S_ABS( znr - cval[idx] );
//% evaluate fp-cond
magma_int_t fpcond = 0;
if( MAGMA_S_ABS(znr)>vkv ){
fpcond = 1;
}
// % combine t-cond and fp-cond + flag_t == 1
magma_int_t cond = 0;
if( t2<t1 || (flag_t[idx]>0 && fpcond > 0 ) ){
cond = 1;
}
flag_fp[idx] = flag_fp[idx]+1;
if( fpcond>0 ){
flag_fp[idx] = 0;
}
if( cond > 0 ){
flag_t[idx] = 0;
zprev[idx] = znew;
xold[idx] = xnew_l;
} else {
flag_t[idx] = 1;
xnew[idx] = xold_l;
}
}
}
/**
Purpose
-------
Checks the Jacobi updates accorting to the condition in the ScaLA'15 paper.
Arguments
---------
@param[in]
delta float
threshold
@param[in,out]
xold magma_s_matrix*
vector xold
@param[in,out]
xnew magma_s_matrix*
vector xnew
@param[in,out]
zprev magma_s_matrix*
vector z = | x_k-1 - x_k |
@param[in]
c magma_s_matrix
contraction coefficients
@param[in,out]
flag_t magma_int_t
threshold condition
@param[in,out]
flag_fp magma_int_t
false positive condition
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_s
********************************************************************/
extern "C" magma_int_t
magma_sftjacobiupdatecheck(
float delta,
magma_s_matrix *xold,
magma_s_matrix *xnew,
magma_s_matrix *zprev,
magma_s_matrix c,
magma_int_t *flag_t,
magma_int_t *flag_fp,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( xnew->num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( sftjacobiupdatecheck_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(),
xold->num_rows, delta, xold->dval, xnew->dval, zprev->dval, c.dval,
flag_t, flag_fp );
return MAGMA_SUCCESS;
}
|
239b2458ccde2e50a3f6635bd87a0df3e25d02fd.cu
|
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from sparse/blas/zjacobisetup.cu, normal z -> s, Sun Nov 20 20:20:40 2016
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_s
__global__ void
svjacobisetup_gpu( int num_rows,
int num_vecs,
float *b,
float *d,
float *c,
float *x)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ){
for( int i=0; i<num_vecs; i++ ){
c[row+i*num_rows] = b[row+i*num_rows] / d[row];
x[row+i*num_rows] = c[row+i*num_rows];
}
}
}
/**
Purpose
-------
Prepares the Jacobi Iteration according to
x^(k+1) = D^(-1) * b - D^(-1) * (L+U) * x^k
x^(k+1) = c - M * x^k.
Returns the vector c. It calls a GPU kernel
Arguments
---------
@param[in]
num_rows magma_int_t
number of rows
@param[in]
b magma_s_matrix
RHS b
@param[in]
d magma_s_matrix
vector with diagonal entries
@param[out]
c magma_s_matrix*
c = D^(-1) * b
@param[out]
x magma_s_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C" magma_int_t
magma_sjacobisetup_vector_gpu(
magma_int_t num_rows,
magma_s_matrix b,
magma_s_matrix d,
magma_s_matrix c,
magma_s_matrix *x,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( num_rows, BLOCK_SIZE ) );
int num_vecs = b.num_rows / num_rows;
magma_int_t threads = BLOCK_SIZE;
svjacobisetup_gpu<<< grid, threads, 0, queue->cuda_stream()>>>
( num_rows, num_vecs, b.dval, d.dval, c.dval, x->val );
return MAGMA_SUCCESS;
}
__global__ void
sjacobidiagscal_kernel( int num_rows,
int num_vecs,
float *b,
float *d,
float *c)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ){
for( int i=0; i<num_vecs; i++)
c[row+i*num_rows] = b[row+i*num_rows] * d[row];
}
}
/**
Purpose
-------
Prepares the Jacobi Iteration according to
x^(k+1) = D^(-1) * b - D^(-1) * (L+U) * x^k
x^(k+1) = c - M * x^k.
Returns the vector c. It calls a GPU kernel
Arguments
---------
@param[in]
num_rows magma_int_t
number of rows
@param[in]
b magma_s_matrix
RHS b
@param[in]
d magma_s_matrix
vector with diagonal entries
@param[out]
c magma_s_matrix*
c = D^(-1) * b
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_s
********************************************************************/
extern "C" magma_int_t
magma_sjacobi_diagscal(
magma_int_t num_rows,
magma_s_matrix d,
magma_s_matrix b,
magma_s_matrix *c,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( num_rows, 512 ));
int num_vecs = b.num_rows*b.num_cols/num_rows;
magma_int_t threads = 512;
sjacobidiagscal_kernel<<< grid, threads, 0, queue->cuda_stream()>>>( num_rows, num_vecs, b.dval, d.dval, c->val );
return MAGMA_SUCCESS;
}
__global__ void
sjacobiupdate_kernel( int num_rows,
int num_cols,
float *t,
float *b,
float *d,
float *x)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ){
for( int i=0; i<num_cols; i++)
x[row+i*num_rows] += (b[row+i*num_rows]-t[row+i*num_rows]) * d[row];
}
}
/**
Purpose
-------
Updates the iteration vector x for the Jacobi iteration
according to
x=x+d.*(b-t)
where d is the diagonal of the system matrix A and t=Ax.
Arguments
---------
@param[in]
t magma_s_matrix
t = A*x
@param[in]
b magma_s_matrix
RHS b
@param[in]
d magma_s_matrix
vector with diagonal entries
@param[out]
x magma_s_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_s
********************************************************************/
extern "C" magma_int_t
magma_sjacobiupdate(
magma_s_matrix t,
magma_s_matrix b,
magma_s_matrix d,
magma_s_matrix *x,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( t.num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
sjacobiupdate_kernel<<< grid, threads, 0, queue->cuda_stream()>>>( t.num_rows, t.num_cols, t.dval, b.dval, d.dval, x->dval );
return MAGMA_SUCCESS;
}
__global__ void
sjacobispmvupdate_kernel(
int num_rows,
int num_cols,
float * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
float *t,
float *b,
float *d,
float *x )
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
int j;
if(row<num_rows){
float dot = MAGMA_S_ZERO;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( int i=0; i<num_cols; i++){
for( j=start; j<end; j++){
dot += dval[ j ] * x[ dcolind[j]+i*num_rows ];
}
x[row+i*num_rows] += (b[row+i*num_rows]-dot) * d[row];
}
}
}
/**
Purpose
-------
Updates the iteration vector x for the Jacobi iteration
according to
x=x+d.*(b-Ax)
Arguments
---------
@param[in]
maxiter magma_int_t
number of Jacobi iterations
@param[in]
A magma_s_matrix
system matrix
@param[in]
t magma_s_matrix
workspace
@param[in]
b magma_s_matrix
RHS b
@param[in]
d magma_s_matrix
vector with diagonal entries
@param[out]
x magma_s_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_s
********************************************************************/
extern "C" magma_int_t
magma_sjacobispmvupdate(
magma_int_t maxiter,
magma_s_matrix A,
magma_s_matrix t,
magma_s_matrix b,
magma_s_matrix d,
magma_s_matrix *x,
magma_queue_t queue )
{
// local variables
//float c_zero = MAGMA_S_ZERO;
//float c_one = MAGMA_S_ONE;
dim3 grid( magma_ceildiv( t.num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
for( magma_int_t i=0; i<maxiter; i++ ) {
// distinct routines imply synchronization
// magma_s_spmv( c_one, A, *x, c_zero, t, queue ); // t = A * x
// sjacobiupdate_kernel<<< grid, threads, 0, queue->cuda_stream()>>>( t.num_rows, t.num_cols, t.dval, b.dval, d.dval, x->dval );
// merged in one implies asynchronous update
sjacobispmvupdate_kernel<<< grid, threads, 0, queue->cuda_stream()>>>
( t.num_rows, t.num_cols, A.dval, A.drow, A.dcol, t.dval, b.dval, d.dval, x->dval );
}
return MAGMA_SUCCESS;
}
__global__ void
sjacobispmvupdate_bw_kernel(
int num_rows,
int num_cols,
float * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
float *t,
float *b,
float *d,
float *x )
{
int row_tmp = blockDim.x * blockIdx.x + threadIdx.x;
int row = num_rows-1 - row_tmp;
int j;
if( row>-1 ){
float dot = MAGMA_S_ZERO;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( int i=0; i<num_cols; i++){
for( j=start; j<end; j++){
dot += dval[ j ] * x[ dcolind[j]+i*num_rows ];
}
x[row+i*num_rows] += (b[row+i*num_rows]-dot) * d[row];
}
}
}
/**
Purpose
-------
Updates the iteration vector x for the Jacobi iteration
according to
x=x+d.*(b-Ax)
This kernel processes the thread blocks in reversed order.
Arguments
---------
@param[in]
maxiter magma_int_t
number of Jacobi iterations
@param[in]
A magma_s_matrix
system matrix
@param[in]
t magma_s_matrix
workspace
@param[in]
b magma_s_matrix
RHS b
@param[in]
d magma_s_matrix
vector with diagonal entries
@param[out]
x magma_s_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_s
********************************************************************/
extern "C" magma_int_t
magma_sjacobispmvupdate_bw(
magma_int_t maxiter,
magma_s_matrix A,
magma_s_matrix t,
magma_s_matrix b,
magma_s_matrix d,
magma_s_matrix *x,
magma_queue_t queue )
{
// local variables
//float c_zero = MAGMA_S_ZERO;
//float c_one = MAGMA_S_ONE;
dim3 grid( magma_ceildiv( t.num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
for( magma_int_t i=0; i<maxiter; i++ ) {
// distinct routines imply synchronization
// magma_s_spmv( c_one, A, *x, c_zero, t, queue ); // t = A * x
// sjacobiupdate_kernel<<< grid, threads, 0, queue->cuda_stream()>>>( t.num_rows, t.num_cols, t.dval, b.dval, d.dval, x->dval );
// merged in one implies asynchronous update
sjacobispmvupdate_bw_kernel<<< grid, threads, 0, queue->cuda_stream()>>>
( t.num_rows, t.num_cols, A.dval, A.drow, A.dcol, t.dval, b.dval, d.dval, x->dval );
}
return MAGMA_SUCCESS;
}
__global__ void
sjacobispmvupdateselect_kernel(
int num_rows,
int num_cols,
int num_updates,
magma_index_t * indices,
float * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
float *t,
float *b,
float *d,
float *x,
float *y )
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int j;
if( idx<num_updates){
int row = indices[ idx ];
printf(" ");
//if( row < num_rows ){
float dot = MAGMA_S_ZERO;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( int i=0; i<num_cols; i++){
for( j=start; j<end; j++){
dot += dval[ j ] * x[ dcolind[j]+i*num_rows ];
}
x[row+i*num_rows] = x[row+i*num_rows] + (b[row+i*num_rows]-dot) * d[row];
//float add = (b[row+i*num_rows]-dot) * d[row];
//#if defined(PRECISION_s) //|| defined(PRECISION_d)
// atomicAdd( x + row + i*num_rows, add );
//#endif
// ( unsigned int* address, unsigned int val);
//}
}
}
}
/**
Purpose
-------
Updates the iteration vector x for the Jacobi iteration
according to
x=x+d.*(b-Ax)
This kernel allows for overlapping domains: the indices-array contains
the locations that are updated. Locations may be repeated to simulate
overlapping domains.
Arguments
---------
@param[in]
maxiter magma_int_t
number of Jacobi iterations
@param[in]
num_updates magma_int_t
number of updates - length of the indices array
@param[in]
indices magma_index_t*
indices, which entries of x to update
@param[in]
A magma_s_matrix
system matrix
@param[in]
t magma_s_matrix
workspace
@param[in]
b magma_s_matrix
RHS b
@param[in]
d magma_s_matrix
vector with diagonal entries
@param[in]
tmp magma_s_matrix
workspace
@param[out]
x magma_s_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_s
********************************************************************/
extern "C" magma_int_t
magma_sjacobispmvupdateselect(
magma_int_t maxiter,
magma_int_t num_updates,
magma_index_t *indices,
magma_s_matrix A,
magma_s_matrix t,
magma_s_matrix b,
magma_s_matrix d,
magma_s_matrix tmp,
magma_s_matrix *x,
magma_queue_t queue )
{
// local variables
//float c_zero = MAGMA_S_ZERO
//float c_one = MAGMA_S_ONE;
//magma_s_matrix swp;
dim3 grid( magma_ceildiv( num_updates, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
printf("num updates:%d %d %d\n", int(num_updates), int(threads), int(grid.x) );
for( magma_int_t i=0; i<maxiter; i++ ) {
sjacobispmvupdateselect_kernel<<< grid, threads, 0, queue->cuda_stream()>>>
( t.num_rows, t.num_cols, num_updates, indices, A.dval, A.drow, A.dcol, t.dval, b.dval, d.dval, x->dval, tmp.dval );
//swp.dval = x->dval;
//x->dval = tmp.dval;
//tmp.dval = swp.dval;
}
return MAGMA_SUCCESS;
}
__global__ void
sftjacobicontractions_kernel(
int num_rows,
float * xkm2val,
float * xkm1val,
float * xkval,
float * zval,
float * cval )
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if( idx<num_rows ){
zval[idx] = MAGMA_S_MAKE( MAGMA_S_ABS( xkm1val[idx] - xkval[idx] ), 0.0);
cval[ idx ] = MAGMA_S_MAKE(
MAGMA_S_ABS( xkm2val[idx] - xkm1val[idx] )
/ MAGMA_S_ABS( xkm1val[idx] - xkval[idx] )
,0.0 );
}
}
/**
Purpose
-------
Computes the contraction coefficients c_i:
c_i = z_i^{k-1} / z_i^{k}
= | x_i^{k-1} - x_i^{k-2} | / | x_i^{k} - x_i^{k-1} |
Arguments
---------
@param[in]
xkm2 magma_s_matrix
vector x^{k-2}
@param[in]
xkm1 magma_s_matrix
vector x^{k-2}
@param[in]
xk magma_s_matrix
vector x^{k-2}
@param[out]
z magma_s_matrix*
ratio
@param[out]
c magma_s_matrix*
contraction coefficients
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_s
********************************************************************/
extern "C" magma_int_t
magma_sftjacobicontractions(
magma_s_matrix xkm2,
magma_s_matrix xkm1,
magma_s_matrix xk,
magma_s_matrix *z,
magma_s_matrix *c,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( xk.num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
sftjacobicontractions_kernel<<< grid, threads, 0, queue->cuda_stream()>>>
( xkm2.num_rows, xkm2.dval, xkm1.dval, xk.dval, z->dval, c->dval );
return MAGMA_SUCCESS;
}
__global__ void
sftjacobiupdatecheck_kernel(
int num_rows,
float delta,
float * xold,
float * xnew,
float * zprev,
float * cval,
magma_int_t *flag_t,
magma_int_t *flag_fp )
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if( idx<num_rows ){
float t1 = delta * MAGMA_S_ABS(cval[idx]);
float vkv = 1.0;
for( magma_int_t i=0; i<min( flag_fp[idx], 100 ); i++){
vkv = vkv*2;
}
float xold_l = xold[idx];
float xnew_l = xnew[idx];
float znew = MAGMA_S_MAKE(
max( MAGMA_S_ABS( xold_l - xnew_l), 1e-15), 0.0 );
float znr = zprev[idx] / znew;
float t2 = MAGMA_S_ABS( znr - cval[idx] );
//% evaluate fp-cond
magma_int_t fpcond = 0;
if( MAGMA_S_ABS(znr)>vkv ){
fpcond = 1;
}
// % combine t-cond and fp-cond + flag_t == 1
magma_int_t cond = 0;
if( t2<t1 || (flag_t[idx]>0 && fpcond > 0 ) ){
cond = 1;
}
flag_fp[idx] = flag_fp[idx]+1;
if( fpcond>0 ){
flag_fp[idx] = 0;
}
if( cond > 0 ){
flag_t[idx] = 0;
zprev[idx] = znew;
xold[idx] = xnew_l;
} else {
flag_t[idx] = 1;
xnew[idx] = xold_l;
}
}
}
/**
Purpose
-------
Checks the Jacobi updates accorting to the condition in the ScaLA'15 paper.
Arguments
---------
@param[in]
delta float
threshold
@param[in,out]
xold magma_s_matrix*
vector xold
@param[in,out]
xnew magma_s_matrix*
vector xnew
@param[in,out]
zprev magma_s_matrix*
vector z = | x_k-1 - x_k |
@param[in]
c magma_s_matrix
contraction coefficients
@param[in,out]
flag_t magma_int_t
threshold condition
@param[in,out]
flag_fp magma_int_t
false positive condition
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_s
********************************************************************/
extern "C" magma_int_t
magma_sftjacobiupdatecheck(
float delta,
magma_s_matrix *xold,
magma_s_matrix *xnew,
magma_s_matrix *zprev,
magma_s_matrix c,
magma_int_t *flag_t,
magma_int_t *flag_fp,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( xnew->num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
sftjacobiupdatecheck_kernel<<< grid, threads, 0, queue->cuda_stream()>>>
( xold->num_rows, delta, xold->dval, xnew->dval, zprev->dval, c.dval,
flag_t, flag_fp );
return MAGMA_SUCCESS;
}
|
4623a71a3f5cc310bea83b5242073ce978a86fef.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "graph_cudnn.h"
#include "cudnn_activation.h"
#include "errors.h"
#include <hip/hip_runtime.h>
#include <vector>
#include <iostream>
#define VERBOSE true
__global__ void addKernel(float* c, const float* a, const float* b, unsigned int size)
{
int i = (1 + blockIdx.x) * threadIdx.x;
c[i] = a[i] + b[i];
}
hipError_t buildAndRunCudaGraph(float* output, const float* input, const float* bias, unsigned int size)
{
hipStream_t streamForGraph;
cudnnHandle_t cudnn;
checkCudaErrors(hipStreamCreateWithFlags(&streamForGraph, hipStreamNonBlocking));
checkCUDNN(cudnnCreate(&cudnn));
checkCUDNN(cudnnSetStream(cudnn, streamForGraph));
// Original
float* dev_input = nullptr;
float* dev_activation = nullptr;
float* dev_bias = nullptr;
float* dev_output = nullptr;
hipError_t cudaStatus;
int threads = ::min(256u, size);
int blocks = (size + threads - 1) / threads;
// For Graph
hipGraph_t graph;
std::vector<hipGraphNode_t> nodeDependencies;
hipGraphNode_t memcpyInputNode, memcpyBiasNode, memcpyOutputNode, kernelNode, cudnnNode;
cudaKernelNodeParams kernelNodeParams = { 0 };
cudaHostNodeParams cudnnNodeParams = { 0 };
hipMemcpy3DParms memcpyParams = { 0 };
// Choose which GPU to run on, change this on a multi-GPU system. Then allocate GPU memory.
{
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
std::cerr << "hipSetDevice failed! Do you have a CUDA-capable GPU installed?\n";
}
cudaStatus = hipMalloc((void**)&dev_output, size * sizeof(float));
if (cudaStatus != hipSuccess) {
std::cerr << "hipMalloc failed!\n";
}
cudaStatus = hipMalloc((void**)&dev_input, size * sizeof(float));
if (cudaStatus != hipSuccess) {
std::cerr << "hipMalloc failed!\n";
}
cudaStatus = hipMalloc((void**)&dev_bias, size * sizeof(float));
if (cudaStatus != hipSuccess) {
std::cerr << "hipMalloc failed!\n";
}
cudaStatus = hipMalloc((void**)&dev_activation, size * sizeof(float));
if (cudaStatus != hipSuccess) {
std::cerr << "hipMalloc failed!\n";
}
}
// Start of Graph Creation
checkCudaErrors(cudaGraphCreate(&graph, 0));
// Add memcpy nodes for copying input vectors from host memory to GPU buffers
memset(&memcpyParams, 0, sizeof(memcpyParams));
memcpyParams.srcArray = NULL;
memcpyParams.srcPos = make_hipPos(0, 0, 0);
memcpyParams.srcPtr = make_hipPitchedPtr((void*)input, size * sizeof(float), size, 1);
memcpyParams.dstArray = NULL;
memcpyParams.dstPos = make_hipPos(0, 0, 0);
memcpyParams.dstPtr = make_hipPitchedPtr(dev_input, size * sizeof(float), size, 1);
memcpyParams.extent = make_hipExtent(size * sizeof(float), 1, 1);
memcpyParams.kind = hipMemcpyHostToDevice;
checkCudaErrors(cudaGraphAddMemcpyNode(&memcpyInputNode, graph, NULL, 0, &memcpyParams));
// hipMemcpy(dev_input, input, size * sizeof(float), hipMemcpyHostToDevice);
memset(&memcpyParams, 0, sizeof(memcpyParams));
memcpyParams.srcArray = NULL;
memcpyParams.srcPos = make_hipPos(0, 0, 0);
memcpyParams.srcPtr = make_hipPitchedPtr((void*)bias, size * sizeof(float), size, 1);
memcpyParams.dstArray = NULL;
memcpyParams.dstPos = make_hipPos(0, 0, 0);
memcpyParams.dstPtr = make_hipPitchedPtr(dev_bias, size * sizeof(float), size, 1);
memcpyParams.extent = make_hipExtent(size * sizeof(float), 1, 1);
memcpyParams.kind = hipMemcpyHostToDevice;
checkCudaErrors(cudaGraphAddMemcpyNode(&memcpyBiasNode, graph, NULL, 0, &memcpyParams));
nodeDependencies.push_back(memcpyBiasNode);
// Add a cudnn node for launching a kernel on the GPU
ActivationParams act_params { dev_input, dev_activation, size, cudnn };
// activation(&act_params);
memset(&cudnnNodeParams, 0, sizeof(cudnnNodeParams));
cudnnNodeParams.fn = reinterpret_cast<hipHostFn_t>(activation);
cudnnNodeParams.userData = &act_params;
checkCudaErrors(cudaGraphAddHostNode(&cudnnNode, graph, &memcpyInputNode, 1, &cudnnNodeParams));
nodeDependencies.push_back(cudnnNode);
// Add a kernel node for launching a kernel on the GPU
memset(&kernelNodeParams, 0, sizeof(kernelNodeParams));
kernelNodeParams.func = (void*)addKernel;
kernelNodeParams.gridDim = dim3(blocks, 1, 1);
kernelNodeParams.blockDim = dim3(threads, 1, 1);
kernelNodeParams.sharedMemBytes = 0;
void* kernelArgs[4] = { (void*)&dev_output, (void*)&dev_activation, (void*)&dev_bias, &size };
kernelNodeParams.kernelParams = kernelArgs;
kernelNodeParams.extra = NULL;
checkCudaErrors(cudaGraphAddKernelNode(&kernelNode, graph, nodeDependencies.data(), nodeDependencies.size(), &kernelNodeParams));
nodeDependencies.clear();
nodeDependencies.push_back(kernelNode);
// Add memcpy node for copying output vector from GPU buffers to host memory
memset(&memcpyParams, 0, sizeof(memcpyParams));
memcpyParams.srcArray = NULL;
memcpyParams.srcPos = make_hipPos(0, 0, 0);
memcpyParams.srcPtr = make_hipPitchedPtr(dev_output, size * sizeof(float), size, 1);
memcpyParams.dstArray = NULL;
memcpyParams.dstPos = make_hipPos(0, 0, 0);
memcpyParams.dstPtr = make_hipPitchedPtr(output, size * sizeof(float), size, 1);
memcpyParams.extent = make_hipExtent(size * sizeof(float), 1, 1);
memcpyParams.kind = hipMemcpyDeviceToHost;
checkCudaErrors(cudaGraphAddMemcpyNode(&memcpyOutputNode, graph, nodeDependencies.data(), nodeDependencies.size(), &memcpyParams));
if (VERBOSE) {
hipGraphNode_t* nodes = NULL;
size_t numNodes = 0;
checkCudaErrors(hipGraphGetNodes(graph, nodes, &numNodes));
std::cout << "Num of nodes in the graph created manually " << numNodes << '\n';
}
// Create an executable graph from a graph
hipGraphExec_t graphExec;
checkCudaErrors(hipGraphInstantiate(&graphExec, graph, NULL, NULL, 0));
// Run the graph
checkCudaErrors(hipGraphLaunch(graphExec, streamForGraph));
checkCudaErrors(hipStreamSynchronize(streamForGraph));
// Clean up
checkCudaErrors(hipGraphExecDestroy(graphExec));
checkCudaErrors(hipGraphDestroy(graph));
checkCudaErrors(hipStreamDestroy(streamForGraph));
cudnnDestroy(cudnn);
hipFree(dev_output);
hipFree(dev_input);
hipFree(dev_bias);
hipFree(dev_activation);
return cudaStatus;
}
|
4623a71a3f5cc310bea83b5242073ce978a86fef.cu
|
#include "graph_cudnn.h"
#include "cudnn_activation.h"
#include "errors.h"
#include <cuda_runtime.h>
#include <vector>
#include <iostream>
#define VERBOSE true
__global__ void addKernel(float* c, const float* a, const float* b, unsigned int size)
{
int i = (1 + blockIdx.x) * threadIdx.x;
c[i] = a[i] + b[i];
}
cudaError_t buildAndRunCudaGraph(float* output, const float* input, const float* bias, unsigned int size)
{
cudaStream_t streamForGraph;
cudnnHandle_t cudnn;
checkCudaErrors(cudaStreamCreateWithFlags(&streamForGraph, cudaStreamNonBlocking));
checkCUDNN(cudnnCreate(&cudnn));
checkCUDNN(cudnnSetStream(cudnn, streamForGraph));
// Original
float* dev_input = nullptr;
float* dev_activation = nullptr;
float* dev_bias = nullptr;
float* dev_output = nullptr;
cudaError_t cudaStatus;
int threads = std::min(256u, size);
int blocks = (size + threads - 1) / threads;
// For Graph
cudaGraph_t graph;
std::vector<cudaGraphNode_t> nodeDependencies;
cudaGraphNode_t memcpyInputNode, memcpyBiasNode, memcpyOutputNode, kernelNode, cudnnNode;
cudaKernelNodeParams kernelNodeParams = { 0 };
cudaHostNodeParams cudnnNodeParams = { 0 };
cudaMemcpy3DParms memcpyParams = { 0 };
// Choose which GPU to run on, change this on a multi-GPU system. Then allocate GPU memory.
{
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
std::cerr << "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?\n";
}
cudaStatus = cudaMalloc((void**)&dev_output, size * sizeof(float));
if (cudaStatus != cudaSuccess) {
std::cerr << "cudaMalloc failed!\n";
}
cudaStatus = cudaMalloc((void**)&dev_input, size * sizeof(float));
if (cudaStatus != cudaSuccess) {
std::cerr << "cudaMalloc failed!\n";
}
cudaStatus = cudaMalloc((void**)&dev_bias, size * sizeof(float));
if (cudaStatus != cudaSuccess) {
std::cerr << "cudaMalloc failed!\n";
}
cudaStatus = cudaMalloc((void**)&dev_activation, size * sizeof(float));
if (cudaStatus != cudaSuccess) {
std::cerr << "cudaMalloc failed!\n";
}
}
// Start of Graph Creation
checkCudaErrors(cudaGraphCreate(&graph, 0));
// Add memcpy nodes for copying input vectors from host memory to GPU buffers
memset(&memcpyParams, 0, sizeof(memcpyParams));
memcpyParams.srcArray = NULL;
memcpyParams.srcPos = make_cudaPos(0, 0, 0);
memcpyParams.srcPtr = make_cudaPitchedPtr((void*)input, size * sizeof(float), size, 1);
memcpyParams.dstArray = NULL;
memcpyParams.dstPos = make_cudaPos(0, 0, 0);
memcpyParams.dstPtr = make_cudaPitchedPtr(dev_input, size * sizeof(float), size, 1);
memcpyParams.extent = make_cudaExtent(size * sizeof(float), 1, 1);
memcpyParams.kind = cudaMemcpyHostToDevice;
checkCudaErrors(cudaGraphAddMemcpyNode(&memcpyInputNode, graph, NULL, 0, &memcpyParams));
// cudaMemcpy(dev_input, input, size * sizeof(float), cudaMemcpyHostToDevice);
memset(&memcpyParams, 0, sizeof(memcpyParams));
memcpyParams.srcArray = NULL;
memcpyParams.srcPos = make_cudaPos(0, 0, 0);
memcpyParams.srcPtr = make_cudaPitchedPtr((void*)bias, size * sizeof(float), size, 1);
memcpyParams.dstArray = NULL;
memcpyParams.dstPos = make_cudaPos(0, 0, 0);
memcpyParams.dstPtr = make_cudaPitchedPtr(dev_bias, size * sizeof(float), size, 1);
memcpyParams.extent = make_cudaExtent(size * sizeof(float), 1, 1);
memcpyParams.kind = cudaMemcpyHostToDevice;
checkCudaErrors(cudaGraphAddMemcpyNode(&memcpyBiasNode, graph, NULL, 0, &memcpyParams));
nodeDependencies.push_back(memcpyBiasNode);
// Add a cudnn node for launching a kernel on the GPU
ActivationParams act_params { dev_input, dev_activation, size, cudnn };
// activation(&act_params);
memset(&cudnnNodeParams, 0, sizeof(cudnnNodeParams));
cudnnNodeParams.fn = reinterpret_cast<cudaHostFn_t>(activation);
cudnnNodeParams.userData = &act_params;
checkCudaErrors(cudaGraphAddHostNode(&cudnnNode, graph, &memcpyInputNode, 1, &cudnnNodeParams));
nodeDependencies.push_back(cudnnNode);
// Add a kernel node for launching a kernel on the GPU
memset(&kernelNodeParams, 0, sizeof(kernelNodeParams));
kernelNodeParams.func = (void*)addKernel;
kernelNodeParams.gridDim = dim3(blocks, 1, 1);
kernelNodeParams.blockDim = dim3(threads, 1, 1);
kernelNodeParams.sharedMemBytes = 0;
void* kernelArgs[4] = { (void*)&dev_output, (void*)&dev_activation, (void*)&dev_bias, &size };
kernelNodeParams.kernelParams = kernelArgs;
kernelNodeParams.extra = NULL;
checkCudaErrors(cudaGraphAddKernelNode(&kernelNode, graph, nodeDependencies.data(), nodeDependencies.size(), &kernelNodeParams));
nodeDependencies.clear();
nodeDependencies.push_back(kernelNode);
// Add memcpy node for copying output vector from GPU buffers to host memory
memset(&memcpyParams, 0, sizeof(memcpyParams));
memcpyParams.srcArray = NULL;
memcpyParams.srcPos = make_cudaPos(0, 0, 0);
memcpyParams.srcPtr = make_cudaPitchedPtr(dev_output, size * sizeof(float), size, 1);
memcpyParams.dstArray = NULL;
memcpyParams.dstPos = make_cudaPos(0, 0, 0);
memcpyParams.dstPtr = make_cudaPitchedPtr(output, size * sizeof(float), size, 1);
memcpyParams.extent = make_cudaExtent(size * sizeof(float), 1, 1);
memcpyParams.kind = cudaMemcpyDeviceToHost;
checkCudaErrors(cudaGraphAddMemcpyNode(&memcpyOutputNode, graph, nodeDependencies.data(), nodeDependencies.size(), &memcpyParams));
if (VERBOSE) {
cudaGraphNode_t* nodes = NULL;
size_t numNodes = 0;
checkCudaErrors(cudaGraphGetNodes(graph, nodes, &numNodes));
std::cout << "Num of nodes in the graph created manually " << numNodes << '\n';
}
// Create an executable graph from a graph
cudaGraphExec_t graphExec;
checkCudaErrors(cudaGraphInstantiate(&graphExec, graph, NULL, NULL, 0));
// Run the graph
checkCudaErrors(cudaGraphLaunch(graphExec, streamForGraph));
checkCudaErrors(cudaStreamSynchronize(streamForGraph));
// Clean up
checkCudaErrors(cudaGraphExecDestroy(graphExec));
checkCudaErrors(cudaGraphDestroy(graph));
checkCudaErrors(cudaStreamDestroy(streamForGraph));
cudnnDestroy(cudnn);
cudaFree(dev_output);
cudaFree(dev_input);
cudaFree(dev_bias);
cudaFree(dev_activation);
return cudaStatus;
}
|
2cd6be744e7f3d3c1104e6d91c6e03b17d4d88bc.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <torch/extension.h>
#include <hip/hip_runtime.h>
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
const int CUDA_NUM_THREADS = 1024;
const int kMaxGridNum = 65535;
inline int GET_BLOCKS(const int N)
{
return ::min(kMaxGridNum, (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS);
}
// ASK_7: [[1,2,4,5,6,8,9],[2,3,4,5,6,7,8]]
template <typename scalar_t>
__global__ void ask_d2_1245689_2345678_cuda_forward_kernel(const int num_kernels,
const torch::PackedTensorAccessor32<scalar_t,4,torch::RestrictPtrTraits> input,
const torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> weights,
torch::PackedTensorAccessor32<scalar_t,5,torch::RestrictPtrTraits> output,
const int output_channels, const int input_channels, const int input_height, const int input_width, const int output_height, const int output_width, const int stride)
{
CUDA_KERNEL_LOOP(index, num_kernels)
{
const int px = index % output_width;
const int py = (index / output_width) % output_height;
const int c = (index / output_width / output_height) % input_channels;
const int n = (index / output_width / output_height / input_channels) % output_channels;
const int b = index / output_width / output_height / input_channels / output_channels;
const int opx = stride * px;
const int opy = stride * py;
const scalar_t data1 = (opx>0&&opy>0) ? input[b][c][opy-1][opx-1] : 0;
const scalar_t data2 = opy>0 ? input[b][c][opy-1][opx] : 0;
const scalar_t data3 = (opx<(input_width-1)&&opy>0) ? input[b][c][opy-1][opx+1] : 0;
const scalar_t data4 = opx>0 ? input[b][c][opy][opx-1] : 0;
const scalar_t data5 = input[b][c][opy][opx];
const scalar_t data6 = opx<(input_width-1) ? input[b][c][opy][opx+1] : 0;
const scalar_t data7 = (opx>0&&opy<(input_height-1)) ? input[b][c][opy+1][opx-1] : 0;
const scalar_t data8 = opy<(input_height-1) ? input[b][c][opy+1][opx] : 0;
const scalar_t data9 = (opx<(input_width-1)&&opy<(input_height-1)) ? input[b][c][opy+1][opx+1] : 0;
output[b][n][c][py][px] = data1 * weights[n][c] + \
data2 * weights[n][c+input_channels] + \
data4 * weights[n][c+2*input_channels] + \
data5 * weights[n][c+3*input_channels] + \
data6 * weights[n][c+4*input_channels] + \
data8 * weights[n][c+5*input_channels] + \
data9 * weights[n][c+6*input_channels];
output[b][n+output_channels][c][py][px] = data2 * weights[n][c+7*input_channels] + \
data3 * weights[n][c+8*input_channels] + \
data4 * weights[n][c+9*input_channels] + \
data5 * weights[n][c+10*input_channels] + \
data6 * weights[n][c+11*input_channels] + \
data7 * weights[n][c+12*input_channels] + \
data8 * weights[n][c+13*input_channels];
}
}
torch::Tensor ask_d2_1245689_2345678_cuda_forward(torch::Tensor input, torch::Tensor weights, const int stride)
{
const auto batch_size = input.size(0);
const auto input_channels = input.size(1);
const auto input_height = input.size(2);
const auto input_width = input.size(3);
const auto output_channels = weights.size(0);
const auto output_height = (input_height - 1) / stride + 1;
const auto output_width = (input_width - 1) / stride + 1;
auto output = at::zeros( {batch_size, output_channels*2, input_channels, output_height, output_width}, input.options() );
const int num_kernels = batch_size * output_channels * input_channels * output_height * output_width;
AT_DISPATCH_FLOATING_TYPES(input.type(), "ask_d2_1245689_2345678_cuda_forward", ([&] {
hipLaunchKernelGGL(( ask_d2_1245689_2345678_cuda_forward_kernel<scalar_t>), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0,
num_kernels,
input.packed_accessor32<scalar_t,4,torch::RestrictPtrTraits>(),
weights.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(),
output.packed_accessor32<scalar_t,5,torch::RestrictPtrTraits>(),
output_channels, input_channels, input_height, input_width, output_height, output_width, stride);
}));
return at::sum(output, 2);
}
// ASK_6: [[1,2,4,5,6,8],[2,3,4,5,6,8],[2,4,5,6,7,8],[2,4,5,6,8,9]]
template <typename scalar_t>
__global__ void ask_d4_124568_234568_245678_245689_cuda_forward_kernel(const int num_kernels,
const torch::PackedTensorAccessor32<scalar_t,4,torch::RestrictPtrTraits> input,
const torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> weights,
torch::PackedTensorAccessor32<scalar_t,5,torch::RestrictPtrTraits> output,
const int output_channels, const int input_channels, const int input_height, const int input_width, const int output_height, const int output_width, const int stride)
{
CUDA_KERNEL_LOOP(index, num_kernels)
{
const int px = index % output_width;
const int py = (index / output_width) % output_height;
const int c = (index / output_width / output_height) % input_channels;
const int n = (index / output_width / output_height / input_channels) % output_channels;
const int b = index / output_width / output_height / input_channels / output_channels;
const int opx = stride * px;
const int opy = stride * py;
const scalar_t data1 = (opx>0&&opy>0) ? input[b][c][opy-1][opx-1] : 0;
const scalar_t data2 = opy>0 ? input[b][c][opy-1][opx] : 0;
const scalar_t data3 = (opx<(input_width-1)&&opy>0) ? input[b][c][opy-1][opx+1] : 0;
const scalar_t data4 = opx>0 ? input[b][c][opy][opx-1] : 0;
const scalar_t data5 = input[b][c][opy][opx];
const scalar_t data6 = opx<(input_width-1) ? input[b][c][opy][opx+1] : 0;
const scalar_t data7 = (opx>0&&opy<(input_height-1)) ? input[b][c][opy+1][opx-1] : 0;
const scalar_t data8 = opy<(input_height-1) ? input[b][c][opy+1][opx] : 0;
const scalar_t data9 = (opx<(input_width-1)&&opy<(input_height-1)) ? input[b][c][opy+1][opx+1] : 0;
output[b][n][c][py][px] = data1 * weights[n][c] + \
data2 * weights[n][c+input_channels] + \
data4 * weights[n][c+2*input_channels] + \
data5 * weights[n][c+3*input_channels] + \
data6 * weights[n][c+4*input_channels] + \
data8 * weights[n][c+5*input_channels];
output[b][n+output_channels][c][py][px] = data2 * weights[n][c+6*input_channels] + \
data3 * weights[n][c+7*input_channels] + \
data4 * weights[n][c+8*input_channels] + \
data5 * weights[n][c+9*input_channels] + \
data6 * weights[n][c+10*input_channels] + \
data8 * weights[n][c+11*input_channels];
output[b][n+2*output_channels][c][py][px] = data2 * weights[n][c+12*input_channels] + \
data4 * weights[n][c+13*input_channels] + \
data5 * weights[n][c+14*input_channels] + \
data6 * weights[n][c+15*input_channels] + \
data7 * weights[n][c+16*input_channels] + \
data8 * weights[n][c+17*input_channels];
output[b][n+3*output_channels][c][py][px] = data2 * weights[n][c+18*input_channels] + \
data4 * weights[n][c+19*input_channels] + \
data5 * weights[n][c+20*input_channels] + \
data6 * weights[n][c+21*input_channels] + \
data8 * weights[n][c+22*input_channels] + \
data9 * weights[n][c+23*input_channels];
}
}
torch::Tensor ask_d4_124568_234568_245678_245689_cuda_forward(torch::Tensor input, torch::Tensor weights, const int stride)
{
const auto batch_size = input.size(0);
const auto input_channels = input.size(1);
const auto input_height = input.size(2);
const auto input_width = input.size(3);
const auto output_channels = weights.size(0);
const auto output_height = (input_height - 1) / stride + 1;
const auto output_width = (input_width - 1) / stride + 1;
auto output = at::zeros( {batch_size, output_channels*4, input_channels, output_height, output_width}, input.options() );
const int num_kernels = batch_size * output_channels * input_channels * output_height * output_width;
AT_DISPATCH_FLOATING_TYPES(input.type(), "ask_d4_124568_234568_245678_245689_cuda_forward", ([&] {
hipLaunchKernelGGL(( ask_d4_124568_234568_245678_245689_cuda_forward_kernel<scalar_t>), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0,
num_kernels,
input.packed_accessor32<scalar_t,4,torch::RestrictPtrTraits>(),
weights.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(),
output.packed_accessor32<scalar_t,5,torch::RestrictPtrTraits>(),
output_channels, input_channels, input_height, input_width, output_height, output_width, stride);
}));
return at::sum(output, 2);
}
// ASK_5a: [[2,4,5,6,8]]
template <typename scalar_t>
__global__ void ask_d1_24568_cuda_forward_kernel(const int num_kernels,
const torch::PackedTensorAccessor32<scalar_t,4,torch::RestrictPtrTraits> input,
const torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> weights,
torch::PackedTensorAccessor32<scalar_t,5,torch::RestrictPtrTraits> output,
const int output_channels, const int input_channels, const int input_height, const int input_width, const int output_height, const int output_width, const int stride)
{
CUDA_KERNEL_LOOP(index, num_kernels)
{
const int px = index % output_width;
const int py = (index / output_width) % output_height;
const int c = (index / output_width / output_height) % input_channels;
const int n = (index / output_width / output_height / input_channels) % output_channels;
const int b = index / output_width / output_height / input_channels / output_channels;
const int opx = stride * px;
const int opy = stride * py;
const scalar_t data2 = opy>0 ? input[b][c][opy-1][opx] : 0;
const scalar_t data4 = opx>0 ? input[b][c][opy][opx-1] : 0;
const scalar_t data5 = input[b][c][opy][opx];
const scalar_t data6 = opx<(input_width-1) ? input[b][c][opy][opx+1] : 0;
const scalar_t data8 = opy<(input_height-1) ? input[b][c][opy+1][opx] : 0;
output[b][n][c][py][px] = data2 * weights[n][c] + \
data4 * weights[n][c+input_channels] + \
data5 * weights[n][c+2*input_channels] + \
data6 * weights[n][c+3*input_channels] + \
data8 * weights[n][c+4*input_channels];
}
}
torch::Tensor ask_d1_24568_cuda_forward(torch::Tensor input, torch::Tensor weights, const int stride)
{
const auto batch_size = input.size(0);
const auto input_channels = input.size(1);
const auto input_height = input.size(2);
const auto input_width = input.size(3);
const auto output_channels = weights.size(0);
const auto output_height = (input_height - 1) / stride + 1;
const auto output_width = (input_width - 1) / stride + 1;
auto output = at::zeros( {batch_size, output_channels*1, input_channels, output_height, output_width}, input.options() );
const int num_kernels = batch_size * output_channels * input_channels * output_height * output_width;
AT_DISPATCH_FLOATING_TYPES(input.type(), "ask_d1_24568_cuda_forward", ([&] {
hipLaunchKernelGGL(( ask_d1_24568_cuda_forward_kernel<scalar_t>), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0,
num_kernels,
input.packed_accessor32<scalar_t,4,torch::RestrictPtrTraits>(),
weights.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(),
output.packed_accessor32<scalar_t,5,torch::RestrictPtrTraits>(),
output_channels, input_channels, input_height, input_width, output_height, output_width, stride);
}));
return at::sum(output, 2);
}
// ASK_5b: [[2,4,5,6,8],[2,4,5,6,8],[2,4,5,6,8],[1,3,5,7,9]]
template <typename scalar_t>
__global__ void ask_d4_24568_24568_24568_13579_cuda_forward_kernel(const int num_kernels,
const torch::PackedTensorAccessor32<scalar_t,4,torch::RestrictPtrTraits> input,
const torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> weights,
torch::PackedTensorAccessor32<scalar_t,5,torch::RestrictPtrTraits> output,
const int output_channels, const int input_channels, const int input_height, const int input_width, const int output_height, const int output_width, const int stride)
{
CUDA_KERNEL_LOOP(index, num_kernels)
{
const int px = index % output_width;
const int py = (index / output_width) % output_height;
const int c = (index / output_width / output_height) % input_channels;
const int n = (index / output_width / output_height / input_channels) % output_channels;
const int b = index / output_width / output_height / input_channels / output_channels;
const int opx = stride * px;
const int opy = stride * py;
const scalar_t data1 = (opx>0&&opy>0) ? input[b][c][opy-1][opx-1] : 0;
const scalar_t data2 = opy>0 ? input[b][c][opy-1][opx] : 0;
const scalar_t data3 = (opx<(input_width-1)&&opy>0) ? input[b][c][opy-1][opx+1] : 0;
const scalar_t data4 = opx>0 ? input[b][c][opy][opx-1] : 0;
const scalar_t data5 = input[b][c][opy][opx];
const scalar_t data6 = opx<(input_width-1) ? input[b][c][opy][opx+1] : 0;
const scalar_t data7 = (opx>0&&opy<(input_height-1)) ? input[b][c][opy+1][opx-1] : 0;
const scalar_t data8 = opy<(input_height-1) ? input[b][c][opy+1][opx] : 0;
const scalar_t data9 = (opx<(input_width-1)&&opy<(input_height-1)) ? input[b][c][opy+1][opx+1] : 0;
output[b][n][c][py][px] = data2 * weights[n][c] + \
data4 * weights[n][c+input_channels] + \
data5 * weights[n][c+2*input_channels] + \
data6 * weights[n][c+3*input_channels] + \
data8 * weights[n][c+4*input_channels];
output[b][n+output_channels][c][py][px] = data2 * weights[n][c+5*input_channels] + \
data4 * weights[n][c+6*input_channels] + \
data5 * weights[n][c+7*input_channels] + \
data6 * weights[n][c+8*input_channels] + \
data8 * weights[n][c+9*input_channels];
output[b][n+2*output_channels][c][py][px] = data2 * weights[n][c+10*input_channels] + \
data4 * weights[n][c+11*input_channels] + \
data5 * weights[n][c+12*input_channels] + \
data6 * weights[n][c+13*input_channels] + \
data8 * weights[n][c+14*input_channels];
output[b][n+3*output_channels][c][py][px] = data1 * weights[n][c+15*input_channels] + \
data3 * weights[n][c+16*input_channels] + \
data5 * weights[n][c+17*input_channels] + \
data7 * weights[n][c+18*input_channels] + \
data9 * weights[n][c+19*input_channels];
}
}
torch::Tensor ask_d4_24568_24568_24568_13579_cuda_forward(torch::Tensor input, torch::Tensor weights, const int stride)
{
const auto batch_size = input.size(0);
const auto input_channels = input.size(1);
const auto input_height = input.size(2);
const auto input_width = input.size(3);
const auto output_channels = weights.size(0);
const auto output_height = (input_height - 1) / stride + 1;
const auto output_width = (input_width - 1) / stride + 1;
auto output = at::zeros( {batch_size, output_channels*4, input_channels, output_height, output_width}, input.options() );
const int num_kernels = batch_size * output_channels * input_channels * output_height * output_width;
AT_DISPATCH_FLOATING_TYPES(input.type(), "ask_d4_24568_24568_24568_13579_cuda_forward", ([&] {
hipLaunchKernelGGL(( ask_d4_24568_24568_24568_13579_cuda_forward_kernel<scalar_t>), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0,
num_kernels,
input.packed_accessor32<scalar_t,4,torch::RestrictPtrTraits>(),
weights.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(),
output.packed_accessor32<scalar_t,5,torch::RestrictPtrTraits>(),
output_channels, input_channels, input_height, input_width, output_height, output_width, stride);
}));
return at::sum(output, 2);
}
// ASK_4a: [[1,2,4,5],[2,3,5,6],[4,5,7,8],[5,6,8,9]]
template <typename scalar_t>
__global__ void ask_d4_1245_2356_4578_5689_cuda_forward_kernel(const int num_kernels,
const torch::PackedTensorAccessor32<scalar_t,4,torch::RestrictPtrTraits> input,
const torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> weights,
torch::PackedTensorAccessor32<scalar_t,5,torch::RestrictPtrTraits> output,
const int output_channels, const int input_channels, const int input_height, const int input_width, const int output_height, const int output_width, const int stride)
{
CUDA_KERNEL_LOOP(index, num_kernels)
{
const int px = index % output_width;
const int py = (index / output_width) % output_height;
const int c = (index / output_width / output_height) % input_channels;
const int n = (index / output_width / output_height / input_channels) % output_channels;
const int b = index / output_width / output_height / input_channels / output_channels;
const int opx = stride * px;
const int opy = stride * py;
const scalar_t data1 = (opx>0&&opy>0) ? input[b][c][opy-1][opx-1] : 0;
const scalar_t data2 = opy>0 ? input[b][c][opy-1][opx] : 0;
const scalar_t data3 = (opx<(input_width-1)&&opy>0) ? input[b][c][opy-1][opx+1] : 0;
const scalar_t data4 = opx>0 ? input[b][c][opy][opx-1] : 0;
const scalar_t data5 = input[b][c][opy][opx];
const scalar_t data6 = opx<(input_width-1) ? input[b][c][opy][opx+1] : 0;
const scalar_t data7 = (opx>0&&opy<(input_height-1)) ? input[b][c][opy+1][opx-1] : 0;
const scalar_t data8 = opy<(input_height-1) ? input[b][c][opy+1][opx] : 0;
const scalar_t data9 = (opx<(input_width-1)&&opy<(input_height-1)) ? input[b][c][opy+1][opx+1] : 0;
output[b][n][c][py][px] = data1 * weights[n][c] + \
data2 * weights[n][c+input_channels] + \
data4 * weights[n][c+2*input_channels] + \
data5 * weights[n][c+3*input_channels];
output[b][n+output_channels][c][py][px] = data2 * weights[n][c+4*input_channels] + \
data3 * weights[n][c+5*input_channels] + \
data5 * weights[n][c+6*input_channels] + \
data6 * weights[n][c+7*input_channels];
output[b][n+2*output_channels][c][py][px] = data4 * weights[n][c+8*input_channels] + \
data5 * weights[n][c+9*input_channels] + \
data7 * weights[n][c+10*input_channels] + \
data8 * weights[n][c+11*input_channels];
output[b][n+3*output_channels][c][py][px] = data5 * weights[n][c+12*input_channels] + \
data6 * weights[n][c+13*input_channels] + \
data8 * weights[n][c+14*input_channels] + \
data9 * weights[n][c+15*input_channels];
}
}
torch::Tensor ask_d4_1245_2356_4578_5689_cuda_forward(torch::Tensor input, torch::Tensor weights, const int stride)
{
const auto batch_size = input.size(0);
const auto input_channels = input.size(1);
const auto input_height = input.size(2);
const auto input_width = input.size(3);
const auto output_channels = weights.size(0);
const auto output_height = (input_height - 1) / stride + 1;
const auto output_width = (input_width - 1) / stride + 1;
auto output = at::zeros( {batch_size, output_channels*4, input_channels, output_height, output_width}, input.options() );
const int num_kernels = batch_size * output_channels * input_channels * output_height * output_width;
AT_DISPATCH_FLOATING_TYPES(input.type(), "ask_d4_1245_2356_4578_5689_cuda_forward", ([&] {
hipLaunchKernelGGL(( ask_d4_1245_2356_4578_5689_cuda_forward_kernel<scalar_t>), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0,
num_kernels,
input.packed_accessor32<scalar_t,4,torch::RestrictPtrTraits>(),
weights.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(),
output.packed_accessor32<scalar_t,5,torch::RestrictPtrTraits>(),
output_channels, input_channels, input_height, input_width, output_height, output_width, stride);
}));
return at::sum(output, 2);
}
// ASK_4b: [[2,4,5,6],[2,5,6,8],[4,5,6,8],[2,4,5,8]]
template <typename scalar_t>
__global__ void ask_d4_2456_2568_4568_2458_cuda_forward_kernel(const int num_kernels,
const torch::PackedTensorAccessor32<scalar_t,4,torch::RestrictPtrTraits> input,
const torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> weights,
torch::PackedTensorAccessor32<scalar_t,5,torch::RestrictPtrTraits> output,
const int output_channels, const int input_channels, const int input_height, const int input_width, const int output_height, const int output_width, const int stride)
{
CUDA_KERNEL_LOOP(index, num_kernels)
{
const int px = index % output_width;
const int py = (index / output_width) % output_height;
const int c = (index / output_width / output_height) % input_channels;
const int n = (index / output_width / output_height / input_channels) % output_channels;
const int b = index / output_width / output_height / input_channels / output_channels;
const int opx = stride * px;
const int opy = stride * py;
const scalar_t data2 = opy>0 ? input[b][c][opy-1][opx] : 0;
const scalar_t data4 = opx>0 ? input[b][c][opy][opx-1] : 0;
const scalar_t data5 = input[b][c][opy][opx];
const scalar_t data6 = opx<(input_width-1) ? input[b][c][opy][opx+1] : 0;
const scalar_t data8 = opy<(input_height-1) ? input[b][c][opy+1][opx] : 0;
output[b][n][c][py][px] = data2 * weights[n][c] + \
data4 * weights[n][c+input_channels] + \
data5 * weights[n][c+2*input_channels] + \
data6 * weights[n][c+3*input_channels];
output[b][n+output_channels][c][py][px] = data2 * weights[n][c+4*input_channels] + \
data5 * weights[n][c+5*input_channels] + \
data6 * weights[n][c+6*input_channels] + \
data8 * weights[n][c+7*input_channels];
output[b][n+2*output_channels][c][py][px] = data4 * weights[n][c+8*input_channels] + \
data5 * weights[n][c+9*input_channels] + \
data6 * weights[n][c+10*input_channels] + \
data8 * weights[n][c+11*input_channels];
output[b][n+3*output_channels][c][py][px] = data2 * weights[n][c+12*input_channels] + \
data4 * weights[n][c+13*input_channels] + \
data5 * weights[n][c+14*input_channels] + \
data8 * weights[n][c+15*input_channels];
}
}
torch::Tensor ask_d4_2456_2568_4568_2458_cuda_forward(torch::Tensor input, torch::Tensor weights, const int stride)
{
const auto batch_size = input.size(0);
const auto input_channels = input.size(1);
const auto input_height = input.size(2);
const auto input_width = input.size(3);
const auto output_channels = weights.size(0);
const auto output_height = (input_height - 1) / stride + 1;
const auto output_width = (input_width - 1) / stride + 1;
auto output = at::zeros( {batch_size, output_channels*4, input_channels, output_height, output_width}, input.options() );
const int num_kernels = batch_size * output_channels * input_channels * output_height * output_width;
AT_DISPATCH_FLOATING_TYPES(input.type(), "ask_d4_2456_2568_4568_2458_cuda_forward", ([&] {
hipLaunchKernelGGL(( ask_d4_2456_2568_4568_2458_cuda_forward_kernel<scalar_t>), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0,
num_kernels,
input.packed_accessor32<scalar_t,4,torch::RestrictPtrTraits>(),
weights.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(),
output.packed_accessor32<scalar_t,5,torch::RestrictPtrTraits>(),
output_channels, input_channels, input_height, input_width, output_height, output_width, stride);
}));
return at::sum(output, 2);
}
// ASK_3a: [[2,4,5],[2,5,6],[4,5,8],[5,6,8]]
template <typename scalar_t>
__global__ void ask_d4_245_256_458_568_cuda_forward_kernel(const int num_kernels,
const torch::PackedTensorAccessor32<scalar_t,4,torch::RestrictPtrTraits> input,
const torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> weights,
torch::PackedTensorAccessor32<scalar_t,5,torch::RestrictPtrTraits> output,
const int output_channels, const int input_channels, const int input_height, const int input_width, const int output_height, const int output_width, const int stride)
{
CUDA_KERNEL_LOOP(index, num_kernels)
{
const int px = index % output_width;
const int py = (index / output_width) % output_height;
const int c = (index / output_width / output_height) % input_channels;
const int n = (index / output_width / output_height / input_channels) % output_channels;
const int b = index / output_width / output_height / input_channels / output_channels;
const int opx = stride * px;
const int opy = stride * py;
const scalar_t data2 = opy>0 ? input[b][c][opy-1][opx] : 0;
const scalar_t data4 = opx>0 ? input[b][c][opy][opx-1] : 0;
const scalar_t data5 = input[b][c][opy][opx];
const scalar_t data6 = opx<(input_width-1) ? input[b][c][opy][opx+1] : 0;
const scalar_t data8 = opy<(input_height-1) ? input[b][c][opy+1][opx] : 0;
output[b][n][c][py][px] = data2 * weights[n][c] + \
data4 * weights[n][c+input_channels] + \
data5 * weights[n][c+2*input_channels];
output[b][n+output_channels][c][py][px] = data2 * weights[n][c+3*input_channels] + \
data5 * weights[n][c+4*input_channels] + \
data6 * weights[n][c+5*input_channels];
output[b][n+2*output_channels][c][py][px] = data4 * weights[n][c+6*input_channels] + \
data5 * weights[n][c+7*input_channels] + \
data8 * weights[n][c+8*input_channels];
output[b][n+3*output_channels][c][py][px] = data5 * weights[n][c+9*input_channels] + \
data6 * weights[n][c+10*input_channels] + \
data8 * weights[n][c+11*input_channels];
}
}
torch::Tensor ask_d4_245_256_458_568_cuda_forward(torch::Tensor input, torch::Tensor weights, const int stride)
{
const auto batch_size = input.size(0);
const auto input_channels = input.size(1);
const auto input_height = input.size(2);
const auto input_width = input.size(3);
const auto output_channels = weights.size(0);
const auto output_height = (input_height - 1) / stride + 1;
const auto output_width = (input_width - 1) / stride + 1;
auto output = at::zeros( {batch_size, output_channels*4, input_channels, output_height, output_width}, input.options() );
const int num_kernels = batch_size * output_channels * input_channels * output_height * output_width;
AT_DISPATCH_FLOATING_TYPES(input.type(), "ask_d4_245_256_458_568_cuda_forward", ([&] {
hipLaunchKernelGGL(( ask_d4_245_256_458_568_cuda_forward_kernel<scalar_t>), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0,
num_kernels,
input.packed_accessor32<scalar_t,4,torch::RestrictPtrTraits>(),
weights.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(),
output.packed_accessor32<scalar_t,5,torch::RestrictPtrTraits>(),
output_channels, input_channels, input_height, input_width, output_height, output_width, stride);
}));
return at::sum(output, 2);
}
// ASK_3b: [[2,5,8],[4,5,6]]
template <typename scalar_t>
__global__ void ask_d2_258_456_cuda_forward_kernel(const int num_kernels,
const torch::PackedTensorAccessor32<scalar_t,4,torch::RestrictPtrTraits> input,
const torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> weights,
torch::PackedTensorAccessor32<scalar_t,5,torch::RestrictPtrTraits> output,
const int output_channels, const int input_channels, const int input_height, const int input_width, const int output_height, const int output_width, const int stride)
{
CUDA_KERNEL_LOOP(index, num_kernels)
{
const int px = index % output_width;
const int py = (index / output_width) % output_height;
const int c = (index / output_width / output_height) % input_channels;
const int n = (index / output_width / output_height / input_channels) % output_channels;
const int b = index / output_width / output_height / input_channels / output_channels;
const int opx = stride * px;
const int opy = stride * py;
const scalar_t data2 = opy>0 ? input[b][c][opy-1][opx] : 0;
const scalar_t data4 = opx>0 ? input[b][c][opy][opx-1] : 0;
const scalar_t data5 = input[b][c][opy][opx];
const scalar_t data6 = opx<(input_width-1) ? input[b][c][opy][opx+1] : 0;
const scalar_t data8 = opy<(input_height-1) ? input[b][c][opy+1][opx] : 0;
output[b][n][c][py][px] = data2 * weights[n][c] + \
data5 * weights[n][c+input_channels] + \
data8 * weights[n][c+2*input_channels];
output[b][n+output_channels][c][py][px] = data4 * weights[n][c+3*input_channels] + \
data5 * weights[n][c+4*input_channels] + \
data6 * weights[n][c+5*input_channels];
}
}
torch::Tensor ask_d2_258_456_cuda_forward(torch::Tensor input, torch::Tensor weights, const int stride)
{
const auto batch_size = input.size(0);
const auto input_channels = input.size(1);
const auto input_height = input.size(2);
const auto input_width = input.size(3);
const auto output_channels = weights.size(0);
const auto output_height = (input_height - 1) / stride + 1;
const auto output_width = (input_width - 1) / stride + 1;
auto output = at::zeros( {batch_size, output_channels*2, input_channels, output_height, output_width}, input.options() );
const int num_kernels = batch_size * output_channels * input_channels * output_height * output_width;
AT_DISPATCH_FLOATING_TYPES(input.type(), "ask_d2_258_456_cuda_forward", ([&] {
hipLaunchKernelGGL(( ask_d2_258_456_cuda_forward_kernel<scalar_t>), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0,
num_kernels,
input.packed_accessor32<scalar_t,4,torch::RestrictPtrTraits>(),
weights.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(),
output.packed_accessor32<scalar_t,5,torch::RestrictPtrTraits>(),
output_channels, input_channels, input_height, input_width, output_height, output_width, stride);
}));
return at::sum(output, 2);
}
// ASK_2: [[2,5],[4,5],[5,6],[5,8]]
template <typename scalar_t>
__global__ void ask_d4_25_45_56_58_cuda_forward_kernel(const int num_kernels,
const torch::PackedTensorAccessor32<scalar_t,4,torch::RestrictPtrTraits> input,
const torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> weights,
torch::PackedTensorAccessor32<scalar_t,5,torch::RestrictPtrTraits> output,
const int output_channels, const int input_channels, const int input_height, const int input_width, const int output_height, const int output_width, const int stride)
{
CUDA_KERNEL_LOOP(index, num_kernels)
{
const int px = index % output_width;
const int py = (index / output_width) % output_height;
const int c = (index / output_width / output_height) % input_channels;
const int n = (index / output_width / output_height / input_channels) % output_channels;
const int b = index / output_width / output_height / input_channels / output_channels;
const int opx = stride * px;
const int opy = stride * py;
const scalar_t data2 = opy>0 ? input[b][c][opy-1][opx] : 0;
const scalar_t data4 = opx>0 ? input[b][c][opy][opx-1] : 0;
const scalar_t data5 = input[b][c][opy][opx];
const scalar_t data6 = opx<(input_width-1) ? input[b][c][opy][opx+1] : 0;
const scalar_t data8 = opy<(input_height-1) ? input[b][c][opy+1][opx] : 0;
output[b][n][c][py][px] = data2 * weights[n][c] + \
data5 * weights[n][c+input_channels];
output[b][n+output_channels][c][py][px] = data4 * weights[n][c+2*input_channels] + \
data5 * weights[n][c+3*input_channels];
output[b][n+2*output_channels][c][py][px] = data5 * weights[n][c+4*input_channels] + \
data6 * weights[n][c+5*input_channels];
output[b][n+3*output_channels][c][py][px] = data5 * weights[n][c+6*input_channels] + \
data8 * weights[n][c+7*input_channels];
}
}
torch::Tensor ask_d4_25_45_56_58_cuda_forward(torch::Tensor input, torch::Tensor weights, const int stride)
{
const auto batch_size = input.size(0);
const auto input_channels = input.size(1);
const auto input_height = input.size(2);
const auto input_width = input.size(3);
const auto output_channels = weights.size(0);
const auto output_height = (input_height - 1) / stride + 1;
const auto output_width = (input_width - 1) / stride + 1;
auto output = at::zeros( {batch_size, output_channels*4, input_channels, output_height, output_width}, input.options() );
const int num_kernels = batch_size * output_channels * input_channels * output_height * output_width;
AT_DISPATCH_FLOATING_TYPES(input.type(), "ask_d4_25_45_56_58_cuda_forward", ([&] {
hipLaunchKernelGGL(( ask_d4_25_45_56_58_cuda_forward_kernel<scalar_t>), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0,
num_kernels,
input.packed_accessor32<scalar_t,4,torch::RestrictPtrTraits>(),
weights.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(),
output.packed_accessor32<scalar_t,5,torch::RestrictPtrTraits>(),
output_channels, input_channels, input_height, input_width, output_height, output_width, stride);
}));
return at::sum(output, 2);
}
|
2cd6be744e7f3d3c1104e6d91c6e03b17d4d88bc.cu
|
#include <torch/extension.h>
#include <cuda.h>
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
const int CUDA_NUM_THREADS = 1024;
const int kMaxGridNum = 65535;
inline int GET_BLOCKS(const int N)
{
return std::min(kMaxGridNum, (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS);
}
// ASK_7: [[1,2,4,5,6,8,9],[2,3,4,5,6,7,8]]
template <typename scalar_t>
__global__ void ask_d2_1245689_2345678_cuda_forward_kernel(const int num_kernels,
const torch::PackedTensorAccessor32<scalar_t,4,torch::RestrictPtrTraits> input,
const torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> weights,
torch::PackedTensorAccessor32<scalar_t,5,torch::RestrictPtrTraits> output,
const int output_channels, const int input_channels, const int input_height, const int input_width, const int output_height, const int output_width, const int stride)
{
CUDA_KERNEL_LOOP(index, num_kernels)
{
const int px = index % output_width;
const int py = (index / output_width) % output_height;
const int c = (index / output_width / output_height) % input_channels;
const int n = (index / output_width / output_height / input_channels) % output_channels;
const int b = index / output_width / output_height / input_channels / output_channels;
const int opx = stride * px;
const int opy = stride * py;
const scalar_t data1 = (opx>0&&opy>0) ? input[b][c][opy-1][opx-1] : 0;
const scalar_t data2 = opy>0 ? input[b][c][opy-1][opx] : 0;
const scalar_t data3 = (opx<(input_width-1)&&opy>0) ? input[b][c][opy-1][opx+1] : 0;
const scalar_t data4 = opx>0 ? input[b][c][opy][opx-1] : 0;
const scalar_t data5 = input[b][c][opy][opx];
const scalar_t data6 = opx<(input_width-1) ? input[b][c][opy][opx+1] : 0;
const scalar_t data7 = (opx>0&&opy<(input_height-1)) ? input[b][c][opy+1][opx-1] : 0;
const scalar_t data8 = opy<(input_height-1) ? input[b][c][opy+1][opx] : 0;
const scalar_t data9 = (opx<(input_width-1)&&opy<(input_height-1)) ? input[b][c][opy+1][opx+1] : 0;
output[b][n][c][py][px] = data1 * weights[n][c] + \
data2 * weights[n][c+input_channels] + \
data4 * weights[n][c+2*input_channels] + \
data5 * weights[n][c+3*input_channels] + \
data6 * weights[n][c+4*input_channels] + \
data8 * weights[n][c+5*input_channels] + \
data9 * weights[n][c+6*input_channels];
output[b][n+output_channels][c][py][px] = data2 * weights[n][c+7*input_channels] + \
data3 * weights[n][c+8*input_channels] + \
data4 * weights[n][c+9*input_channels] + \
data5 * weights[n][c+10*input_channels] + \
data6 * weights[n][c+11*input_channels] + \
data7 * weights[n][c+12*input_channels] + \
data8 * weights[n][c+13*input_channels];
}
}
torch::Tensor ask_d2_1245689_2345678_cuda_forward(torch::Tensor input, torch::Tensor weights, const int stride)
{
const auto batch_size = input.size(0);
const auto input_channels = input.size(1);
const auto input_height = input.size(2);
const auto input_width = input.size(3);
const auto output_channels = weights.size(0);
const auto output_height = (input_height - 1) / stride + 1;
const auto output_width = (input_width - 1) / stride + 1;
auto output = at::zeros( {batch_size, output_channels*2, input_channels, output_height, output_width}, input.options() );
const int num_kernels = batch_size * output_channels * input_channels * output_height * output_width;
AT_DISPATCH_FLOATING_TYPES(input.type(), "ask_d2_1245689_2345678_cuda_forward", ([&] {
ask_d2_1245689_2345678_cuda_forward_kernel<scalar_t><<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels,
input.packed_accessor32<scalar_t,4,torch::RestrictPtrTraits>(),
weights.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(),
output.packed_accessor32<scalar_t,5,torch::RestrictPtrTraits>(),
output_channels, input_channels, input_height, input_width, output_height, output_width, stride);
}));
return at::sum(output, 2);
}
// ASK_6: [[1,2,4,5,6,8],[2,3,4,5,6,8],[2,4,5,6,7,8],[2,4,5,6,8,9]]
template <typename scalar_t>
__global__ void ask_d4_124568_234568_245678_245689_cuda_forward_kernel(const int num_kernels,
const torch::PackedTensorAccessor32<scalar_t,4,torch::RestrictPtrTraits> input,
const torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> weights,
torch::PackedTensorAccessor32<scalar_t,5,torch::RestrictPtrTraits> output,
const int output_channels, const int input_channels, const int input_height, const int input_width, const int output_height, const int output_width, const int stride)
{
CUDA_KERNEL_LOOP(index, num_kernels)
{
const int px = index % output_width;
const int py = (index / output_width) % output_height;
const int c = (index / output_width / output_height) % input_channels;
const int n = (index / output_width / output_height / input_channels) % output_channels;
const int b = index / output_width / output_height / input_channels / output_channels;
const int opx = stride * px;
const int opy = stride * py;
const scalar_t data1 = (opx>0&&opy>0) ? input[b][c][opy-1][opx-1] : 0;
const scalar_t data2 = opy>0 ? input[b][c][opy-1][opx] : 0;
const scalar_t data3 = (opx<(input_width-1)&&opy>0) ? input[b][c][opy-1][opx+1] : 0;
const scalar_t data4 = opx>0 ? input[b][c][opy][opx-1] : 0;
const scalar_t data5 = input[b][c][opy][opx];
const scalar_t data6 = opx<(input_width-1) ? input[b][c][opy][opx+1] : 0;
const scalar_t data7 = (opx>0&&opy<(input_height-1)) ? input[b][c][opy+1][opx-1] : 0;
const scalar_t data8 = opy<(input_height-1) ? input[b][c][opy+1][opx] : 0;
const scalar_t data9 = (opx<(input_width-1)&&opy<(input_height-1)) ? input[b][c][opy+1][opx+1] : 0;
output[b][n][c][py][px] = data1 * weights[n][c] + \
data2 * weights[n][c+input_channels] + \
data4 * weights[n][c+2*input_channels] + \
data5 * weights[n][c+3*input_channels] + \
data6 * weights[n][c+4*input_channels] + \
data8 * weights[n][c+5*input_channels];
output[b][n+output_channels][c][py][px] = data2 * weights[n][c+6*input_channels] + \
data3 * weights[n][c+7*input_channels] + \
data4 * weights[n][c+8*input_channels] + \
data5 * weights[n][c+9*input_channels] + \
data6 * weights[n][c+10*input_channels] + \
data8 * weights[n][c+11*input_channels];
output[b][n+2*output_channels][c][py][px] = data2 * weights[n][c+12*input_channels] + \
data4 * weights[n][c+13*input_channels] + \
data5 * weights[n][c+14*input_channels] + \
data6 * weights[n][c+15*input_channels] + \
data7 * weights[n][c+16*input_channels] + \
data8 * weights[n][c+17*input_channels];
output[b][n+3*output_channels][c][py][px] = data2 * weights[n][c+18*input_channels] + \
data4 * weights[n][c+19*input_channels] + \
data5 * weights[n][c+20*input_channels] + \
data6 * weights[n][c+21*input_channels] + \
data8 * weights[n][c+22*input_channels] + \
data9 * weights[n][c+23*input_channels];
}
}
torch::Tensor ask_d4_124568_234568_245678_245689_cuda_forward(torch::Tensor input, torch::Tensor weights, const int stride)
{
const auto batch_size = input.size(0);
const auto input_channels = input.size(1);
const auto input_height = input.size(2);
const auto input_width = input.size(3);
const auto output_channels = weights.size(0);
const auto output_height = (input_height - 1) / stride + 1;
const auto output_width = (input_width - 1) / stride + 1;
auto output = at::zeros( {batch_size, output_channels*4, input_channels, output_height, output_width}, input.options() );
const int num_kernels = batch_size * output_channels * input_channels * output_height * output_width;
AT_DISPATCH_FLOATING_TYPES(input.type(), "ask_d4_124568_234568_245678_245689_cuda_forward", ([&] {
ask_d4_124568_234568_245678_245689_cuda_forward_kernel<scalar_t><<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels,
input.packed_accessor32<scalar_t,4,torch::RestrictPtrTraits>(),
weights.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(),
output.packed_accessor32<scalar_t,5,torch::RestrictPtrTraits>(),
output_channels, input_channels, input_height, input_width, output_height, output_width, stride);
}));
return at::sum(output, 2);
}
// ASK_5a: [[2,4,5,6,8]]
template <typename scalar_t>
__global__ void ask_d1_24568_cuda_forward_kernel(const int num_kernels,
const torch::PackedTensorAccessor32<scalar_t,4,torch::RestrictPtrTraits> input,
const torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> weights,
torch::PackedTensorAccessor32<scalar_t,5,torch::RestrictPtrTraits> output,
const int output_channels, const int input_channels, const int input_height, const int input_width, const int output_height, const int output_width, const int stride)
{
CUDA_KERNEL_LOOP(index, num_kernels)
{
const int px = index % output_width;
const int py = (index / output_width) % output_height;
const int c = (index / output_width / output_height) % input_channels;
const int n = (index / output_width / output_height / input_channels) % output_channels;
const int b = index / output_width / output_height / input_channels / output_channels;
const int opx = stride * px;
const int opy = stride * py;
const scalar_t data2 = opy>0 ? input[b][c][opy-1][opx] : 0;
const scalar_t data4 = opx>0 ? input[b][c][opy][opx-1] : 0;
const scalar_t data5 = input[b][c][opy][opx];
const scalar_t data6 = opx<(input_width-1) ? input[b][c][opy][opx+1] : 0;
const scalar_t data8 = opy<(input_height-1) ? input[b][c][opy+1][opx] : 0;
output[b][n][c][py][px] = data2 * weights[n][c] + \
data4 * weights[n][c+input_channels] + \
data5 * weights[n][c+2*input_channels] + \
data6 * weights[n][c+3*input_channels] + \
data8 * weights[n][c+4*input_channels];
}
}
torch::Tensor ask_d1_24568_cuda_forward(torch::Tensor input, torch::Tensor weights, const int stride)
{
const auto batch_size = input.size(0);
const auto input_channels = input.size(1);
const auto input_height = input.size(2);
const auto input_width = input.size(3);
const auto output_channels = weights.size(0);
const auto output_height = (input_height - 1) / stride + 1;
const auto output_width = (input_width - 1) / stride + 1;
auto output = at::zeros( {batch_size, output_channels*1, input_channels, output_height, output_width}, input.options() );
const int num_kernels = batch_size * output_channels * input_channels * output_height * output_width;
AT_DISPATCH_FLOATING_TYPES(input.type(), "ask_d1_24568_cuda_forward", ([&] {
ask_d1_24568_cuda_forward_kernel<scalar_t><<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels,
input.packed_accessor32<scalar_t,4,torch::RestrictPtrTraits>(),
weights.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(),
output.packed_accessor32<scalar_t,5,torch::RestrictPtrTraits>(),
output_channels, input_channels, input_height, input_width, output_height, output_width, stride);
}));
return at::sum(output, 2);
}
// ASK_5b: [[2,4,5,6,8],[2,4,5,6,8],[2,4,5,6,8],[1,3,5,7,9]]
template <typename scalar_t>
__global__ void ask_d4_24568_24568_24568_13579_cuda_forward_kernel(const int num_kernels,
const torch::PackedTensorAccessor32<scalar_t,4,torch::RestrictPtrTraits> input,
const torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> weights,
torch::PackedTensorAccessor32<scalar_t,5,torch::RestrictPtrTraits> output,
const int output_channels, const int input_channels, const int input_height, const int input_width, const int output_height, const int output_width, const int stride)
{
CUDA_KERNEL_LOOP(index, num_kernels)
{
const int px = index % output_width;
const int py = (index / output_width) % output_height;
const int c = (index / output_width / output_height) % input_channels;
const int n = (index / output_width / output_height / input_channels) % output_channels;
const int b = index / output_width / output_height / input_channels / output_channels;
const int opx = stride * px;
const int opy = stride * py;
const scalar_t data1 = (opx>0&&opy>0) ? input[b][c][opy-1][opx-1] : 0;
const scalar_t data2 = opy>0 ? input[b][c][opy-1][opx] : 0;
const scalar_t data3 = (opx<(input_width-1)&&opy>0) ? input[b][c][opy-1][opx+1] : 0;
const scalar_t data4 = opx>0 ? input[b][c][opy][opx-1] : 0;
const scalar_t data5 = input[b][c][opy][opx];
const scalar_t data6 = opx<(input_width-1) ? input[b][c][opy][opx+1] : 0;
const scalar_t data7 = (opx>0&&opy<(input_height-1)) ? input[b][c][opy+1][opx-1] : 0;
const scalar_t data8 = opy<(input_height-1) ? input[b][c][opy+1][opx] : 0;
const scalar_t data9 = (opx<(input_width-1)&&opy<(input_height-1)) ? input[b][c][opy+1][opx+1] : 0;
output[b][n][c][py][px] = data2 * weights[n][c] + \
data4 * weights[n][c+input_channels] + \
data5 * weights[n][c+2*input_channels] + \
data6 * weights[n][c+3*input_channels] + \
data8 * weights[n][c+4*input_channels];
output[b][n+output_channels][c][py][px] = data2 * weights[n][c+5*input_channels] + \
data4 * weights[n][c+6*input_channels] + \
data5 * weights[n][c+7*input_channels] + \
data6 * weights[n][c+8*input_channels] + \
data8 * weights[n][c+9*input_channels];
output[b][n+2*output_channels][c][py][px] = data2 * weights[n][c+10*input_channels] + \
data4 * weights[n][c+11*input_channels] + \
data5 * weights[n][c+12*input_channels] + \
data6 * weights[n][c+13*input_channels] + \
data8 * weights[n][c+14*input_channels];
output[b][n+3*output_channels][c][py][px] = data1 * weights[n][c+15*input_channels] + \
data3 * weights[n][c+16*input_channels] + \
data5 * weights[n][c+17*input_channels] + \
data7 * weights[n][c+18*input_channels] + \
data9 * weights[n][c+19*input_channels];
}
}
torch::Tensor ask_d4_24568_24568_24568_13579_cuda_forward(torch::Tensor input, torch::Tensor weights, const int stride)
{
const auto batch_size = input.size(0);
const auto input_channels = input.size(1);
const auto input_height = input.size(2);
const auto input_width = input.size(3);
const auto output_channels = weights.size(0);
const auto output_height = (input_height - 1) / stride + 1;
const auto output_width = (input_width - 1) / stride + 1;
auto output = at::zeros( {batch_size, output_channels*4, input_channels, output_height, output_width}, input.options() );
const int num_kernels = batch_size * output_channels * input_channels * output_height * output_width;
AT_DISPATCH_FLOATING_TYPES(input.type(), "ask_d4_24568_24568_24568_13579_cuda_forward", ([&] {
ask_d4_24568_24568_24568_13579_cuda_forward_kernel<scalar_t><<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels,
input.packed_accessor32<scalar_t,4,torch::RestrictPtrTraits>(),
weights.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(),
output.packed_accessor32<scalar_t,5,torch::RestrictPtrTraits>(),
output_channels, input_channels, input_height, input_width, output_height, output_width, stride);
}));
return at::sum(output, 2);
}
// ASK_4a: [[1,2,4,5],[2,3,5,6],[4,5,7,8],[5,6,8,9]]
template <typename scalar_t>
__global__ void ask_d4_1245_2356_4578_5689_cuda_forward_kernel(const int num_kernels,
const torch::PackedTensorAccessor32<scalar_t,4,torch::RestrictPtrTraits> input,
const torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> weights,
torch::PackedTensorAccessor32<scalar_t,5,torch::RestrictPtrTraits> output,
const int output_channels, const int input_channels, const int input_height, const int input_width, const int output_height, const int output_width, const int stride)
{
CUDA_KERNEL_LOOP(index, num_kernels)
{
const int px = index % output_width;
const int py = (index / output_width) % output_height;
const int c = (index / output_width / output_height) % input_channels;
const int n = (index / output_width / output_height / input_channels) % output_channels;
const int b = index / output_width / output_height / input_channels / output_channels;
const int opx = stride * px;
const int opy = stride * py;
const scalar_t data1 = (opx>0&&opy>0) ? input[b][c][opy-1][opx-1] : 0;
const scalar_t data2 = opy>0 ? input[b][c][opy-1][opx] : 0;
const scalar_t data3 = (opx<(input_width-1)&&opy>0) ? input[b][c][opy-1][opx+1] : 0;
const scalar_t data4 = opx>0 ? input[b][c][opy][opx-1] : 0;
const scalar_t data5 = input[b][c][opy][opx];
const scalar_t data6 = opx<(input_width-1) ? input[b][c][opy][opx+1] : 0;
const scalar_t data7 = (opx>0&&opy<(input_height-1)) ? input[b][c][opy+1][opx-1] : 0;
const scalar_t data8 = opy<(input_height-1) ? input[b][c][opy+1][opx] : 0;
const scalar_t data9 = (opx<(input_width-1)&&opy<(input_height-1)) ? input[b][c][opy+1][opx+1] : 0;
output[b][n][c][py][px] = data1 * weights[n][c] + \
data2 * weights[n][c+input_channels] + \
data4 * weights[n][c+2*input_channels] + \
data5 * weights[n][c+3*input_channels];
output[b][n+output_channels][c][py][px] = data2 * weights[n][c+4*input_channels] + \
data3 * weights[n][c+5*input_channels] + \
data5 * weights[n][c+6*input_channels] + \
data6 * weights[n][c+7*input_channels];
output[b][n+2*output_channels][c][py][px] = data4 * weights[n][c+8*input_channels] + \
data5 * weights[n][c+9*input_channels] + \
data7 * weights[n][c+10*input_channels] + \
data8 * weights[n][c+11*input_channels];
output[b][n+3*output_channels][c][py][px] = data5 * weights[n][c+12*input_channels] + \
data6 * weights[n][c+13*input_channels] + \
data8 * weights[n][c+14*input_channels] + \
data9 * weights[n][c+15*input_channels];
}
}
torch::Tensor ask_d4_1245_2356_4578_5689_cuda_forward(torch::Tensor input, torch::Tensor weights, const int stride)
{
const auto batch_size = input.size(0);
const auto input_channels = input.size(1);
const auto input_height = input.size(2);
const auto input_width = input.size(3);
const auto output_channels = weights.size(0);
const auto output_height = (input_height - 1) / stride + 1;
const auto output_width = (input_width - 1) / stride + 1;
auto output = at::zeros( {batch_size, output_channels*4, input_channels, output_height, output_width}, input.options() );
const int num_kernels = batch_size * output_channels * input_channels * output_height * output_width;
AT_DISPATCH_FLOATING_TYPES(input.type(), "ask_d4_1245_2356_4578_5689_cuda_forward", ([&] {
ask_d4_1245_2356_4578_5689_cuda_forward_kernel<scalar_t><<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels,
input.packed_accessor32<scalar_t,4,torch::RestrictPtrTraits>(),
weights.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(),
output.packed_accessor32<scalar_t,5,torch::RestrictPtrTraits>(),
output_channels, input_channels, input_height, input_width, output_height, output_width, stride);
}));
return at::sum(output, 2);
}
// ASK_4b: [[2,4,5,6],[2,5,6,8],[4,5,6,8],[2,4,5,8]]
template <typename scalar_t>
__global__ void ask_d4_2456_2568_4568_2458_cuda_forward_kernel(const int num_kernels,
const torch::PackedTensorAccessor32<scalar_t,4,torch::RestrictPtrTraits> input,
const torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> weights,
torch::PackedTensorAccessor32<scalar_t,5,torch::RestrictPtrTraits> output,
const int output_channels, const int input_channels, const int input_height, const int input_width, const int output_height, const int output_width, const int stride)
{
CUDA_KERNEL_LOOP(index, num_kernels)
{
const int px = index % output_width;
const int py = (index / output_width) % output_height;
const int c = (index / output_width / output_height) % input_channels;
const int n = (index / output_width / output_height / input_channels) % output_channels;
const int b = index / output_width / output_height / input_channels / output_channels;
const int opx = stride * px;
const int opy = stride * py;
const scalar_t data2 = opy>0 ? input[b][c][opy-1][opx] : 0;
const scalar_t data4 = opx>0 ? input[b][c][opy][opx-1] : 0;
const scalar_t data5 = input[b][c][opy][opx];
const scalar_t data6 = opx<(input_width-1) ? input[b][c][opy][opx+1] : 0;
const scalar_t data8 = opy<(input_height-1) ? input[b][c][opy+1][opx] : 0;
output[b][n][c][py][px] = data2 * weights[n][c] + \
data4 * weights[n][c+input_channels] + \
data5 * weights[n][c+2*input_channels] + \
data6 * weights[n][c+3*input_channels];
output[b][n+output_channels][c][py][px] = data2 * weights[n][c+4*input_channels] + \
data5 * weights[n][c+5*input_channels] + \
data6 * weights[n][c+6*input_channels] + \
data8 * weights[n][c+7*input_channels];
output[b][n+2*output_channels][c][py][px] = data4 * weights[n][c+8*input_channels] + \
data5 * weights[n][c+9*input_channels] + \
data6 * weights[n][c+10*input_channels] + \
data8 * weights[n][c+11*input_channels];
output[b][n+3*output_channels][c][py][px] = data2 * weights[n][c+12*input_channels] + \
data4 * weights[n][c+13*input_channels] + \
data5 * weights[n][c+14*input_channels] + \
data8 * weights[n][c+15*input_channels];
}
}
torch::Tensor ask_d4_2456_2568_4568_2458_cuda_forward(torch::Tensor input, torch::Tensor weights, const int stride)
{
const auto batch_size = input.size(0);
const auto input_channels = input.size(1);
const auto input_height = input.size(2);
const auto input_width = input.size(3);
const auto output_channels = weights.size(0);
const auto output_height = (input_height - 1) / stride + 1;
const auto output_width = (input_width - 1) / stride + 1;
auto output = at::zeros( {batch_size, output_channels*4, input_channels, output_height, output_width}, input.options() );
const int num_kernels = batch_size * output_channels * input_channels * output_height * output_width;
AT_DISPATCH_FLOATING_TYPES(input.type(), "ask_d4_2456_2568_4568_2458_cuda_forward", ([&] {
ask_d4_2456_2568_4568_2458_cuda_forward_kernel<scalar_t><<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels,
input.packed_accessor32<scalar_t,4,torch::RestrictPtrTraits>(),
weights.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(),
output.packed_accessor32<scalar_t,5,torch::RestrictPtrTraits>(),
output_channels, input_channels, input_height, input_width, output_height, output_width, stride);
}));
return at::sum(output, 2);
}
// ASK_3a: [[2,4,5],[2,5,6],[4,5,8],[5,6,8]]
template <typename scalar_t>
__global__ void ask_d4_245_256_458_568_cuda_forward_kernel(const int num_kernels,
const torch::PackedTensorAccessor32<scalar_t,4,torch::RestrictPtrTraits> input,
const torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> weights,
torch::PackedTensorAccessor32<scalar_t,5,torch::RestrictPtrTraits> output,
const int output_channels, const int input_channels, const int input_height, const int input_width, const int output_height, const int output_width, const int stride)
{
CUDA_KERNEL_LOOP(index, num_kernels)
{
const int px = index % output_width;
const int py = (index / output_width) % output_height;
const int c = (index / output_width / output_height) % input_channels;
const int n = (index / output_width / output_height / input_channels) % output_channels;
const int b = index / output_width / output_height / input_channels / output_channels;
const int opx = stride * px;
const int opy = stride * py;
const scalar_t data2 = opy>0 ? input[b][c][opy-1][opx] : 0;
const scalar_t data4 = opx>0 ? input[b][c][opy][opx-1] : 0;
const scalar_t data5 = input[b][c][opy][opx];
const scalar_t data6 = opx<(input_width-1) ? input[b][c][opy][opx+1] : 0;
const scalar_t data8 = opy<(input_height-1) ? input[b][c][opy+1][opx] : 0;
output[b][n][c][py][px] = data2 * weights[n][c] + \
data4 * weights[n][c+input_channels] + \
data5 * weights[n][c+2*input_channels];
output[b][n+output_channels][c][py][px] = data2 * weights[n][c+3*input_channels] + \
data5 * weights[n][c+4*input_channels] + \
data6 * weights[n][c+5*input_channels];
output[b][n+2*output_channels][c][py][px] = data4 * weights[n][c+6*input_channels] + \
data5 * weights[n][c+7*input_channels] + \
data8 * weights[n][c+8*input_channels];
output[b][n+3*output_channels][c][py][px] = data5 * weights[n][c+9*input_channels] + \
data6 * weights[n][c+10*input_channels] + \
data8 * weights[n][c+11*input_channels];
}
}
torch::Tensor ask_d4_245_256_458_568_cuda_forward(torch::Tensor input, torch::Tensor weights, const int stride)
{
const auto batch_size = input.size(0);
const auto input_channels = input.size(1);
const auto input_height = input.size(2);
const auto input_width = input.size(3);
const auto output_channels = weights.size(0);
const auto output_height = (input_height - 1) / stride + 1;
const auto output_width = (input_width - 1) / stride + 1;
auto output = at::zeros( {batch_size, output_channels*4, input_channels, output_height, output_width}, input.options() );
const int num_kernels = batch_size * output_channels * input_channels * output_height * output_width;
AT_DISPATCH_FLOATING_TYPES(input.type(), "ask_d4_245_256_458_568_cuda_forward", ([&] {
ask_d4_245_256_458_568_cuda_forward_kernel<scalar_t><<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels,
input.packed_accessor32<scalar_t,4,torch::RestrictPtrTraits>(),
weights.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(),
output.packed_accessor32<scalar_t,5,torch::RestrictPtrTraits>(),
output_channels, input_channels, input_height, input_width, output_height, output_width, stride);
}));
return at::sum(output, 2);
}
// ASK_3b: [[2,5,8],[4,5,6]]
template <typename scalar_t>
__global__ void ask_d2_258_456_cuda_forward_kernel(const int num_kernels,
const torch::PackedTensorAccessor32<scalar_t,4,torch::RestrictPtrTraits> input,
const torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> weights,
torch::PackedTensorAccessor32<scalar_t,5,torch::RestrictPtrTraits> output,
const int output_channels, const int input_channels, const int input_height, const int input_width, const int output_height, const int output_width, const int stride)
{
CUDA_KERNEL_LOOP(index, num_kernels)
{
const int px = index % output_width;
const int py = (index / output_width) % output_height;
const int c = (index / output_width / output_height) % input_channels;
const int n = (index / output_width / output_height / input_channels) % output_channels;
const int b = index / output_width / output_height / input_channels / output_channels;
const int opx = stride * px;
const int opy = stride * py;
const scalar_t data2 = opy>0 ? input[b][c][opy-1][opx] : 0;
const scalar_t data4 = opx>0 ? input[b][c][opy][opx-1] : 0;
const scalar_t data5 = input[b][c][opy][opx];
const scalar_t data6 = opx<(input_width-1) ? input[b][c][opy][opx+1] : 0;
const scalar_t data8 = opy<(input_height-1) ? input[b][c][opy+1][opx] : 0;
output[b][n][c][py][px] = data2 * weights[n][c] + \
data5 * weights[n][c+input_channels] + \
data8 * weights[n][c+2*input_channels];
output[b][n+output_channels][c][py][px] = data4 * weights[n][c+3*input_channels] + \
data5 * weights[n][c+4*input_channels] + \
data6 * weights[n][c+5*input_channels];
}
}
torch::Tensor ask_d2_258_456_cuda_forward(torch::Tensor input, torch::Tensor weights, const int stride)
{
const auto batch_size = input.size(0);
const auto input_channels = input.size(1);
const auto input_height = input.size(2);
const auto input_width = input.size(3);
const auto output_channels = weights.size(0);
const auto output_height = (input_height - 1) / stride + 1;
const auto output_width = (input_width - 1) / stride + 1;
auto output = at::zeros( {batch_size, output_channels*2, input_channels, output_height, output_width}, input.options() );
const int num_kernels = batch_size * output_channels * input_channels * output_height * output_width;
AT_DISPATCH_FLOATING_TYPES(input.type(), "ask_d2_258_456_cuda_forward", ([&] {
ask_d2_258_456_cuda_forward_kernel<scalar_t><<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels,
input.packed_accessor32<scalar_t,4,torch::RestrictPtrTraits>(),
weights.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(),
output.packed_accessor32<scalar_t,5,torch::RestrictPtrTraits>(),
output_channels, input_channels, input_height, input_width, output_height, output_width, stride);
}));
return at::sum(output, 2);
}
// ASK_2: [[2,5],[4,5],[5,6],[5,8]]
template <typename scalar_t>
__global__ void ask_d4_25_45_56_58_cuda_forward_kernel(const int num_kernels,
const torch::PackedTensorAccessor32<scalar_t,4,torch::RestrictPtrTraits> input,
const torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> weights,
torch::PackedTensorAccessor32<scalar_t,5,torch::RestrictPtrTraits> output,
const int output_channels, const int input_channels, const int input_height, const int input_width, const int output_height, const int output_width, const int stride)
{
CUDA_KERNEL_LOOP(index, num_kernels)
{
const int px = index % output_width;
const int py = (index / output_width) % output_height;
const int c = (index / output_width / output_height) % input_channels;
const int n = (index / output_width / output_height / input_channels) % output_channels;
const int b = index / output_width / output_height / input_channels / output_channels;
const int opx = stride * px;
const int opy = stride * py;
const scalar_t data2 = opy>0 ? input[b][c][opy-1][opx] : 0;
const scalar_t data4 = opx>0 ? input[b][c][opy][opx-1] : 0;
const scalar_t data5 = input[b][c][opy][opx];
const scalar_t data6 = opx<(input_width-1) ? input[b][c][opy][opx+1] : 0;
const scalar_t data8 = opy<(input_height-1) ? input[b][c][opy+1][opx] : 0;
output[b][n][c][py][px] = data2 * weights[n][c] + \
data5 * weights[n][c+input_channels];
output[b][n+output_channels][c][py][px] = data4 * weights[n][c+2*input_channels] + \
data5 * weights[n][c+3*input_channels];
output[b][n+2*output_channels][c][py][px] = data5 * weights[n][c+4*input_channels] + \
data6 * weights[n][c+5*input_channels];
output[b][n+3*output_channels][c][py][px] = data5 * weights[n][c+6*input_channels] + \
data8 * weights[n][c+7*input_channels];
}
}
torch::Tensor ask_d4_25_45_56_58_cuda_forward(torch::Tensor input, torch::Tensor weights, const int stride)
{
const auto batch_size = input.size(0);
const auto input_channels = input.size(1);
const auto input_height = input.size(2);
const auto input_width = input.size(3);
const auto output_channels = weights.size(0);
const auto output_height = (input_height - 1) / stride + 1;
const auto output_width = (input_width - 1) / stride + 1;
auto output = at::zeros( {batch_size, output_channels*4, input_channels, output_height, output_width}, input.options() );
const int num_kernels = batch_size * output_channels * input_channels * output_height * output_width;
AT_DISPATCH_FLOATING_TYPES(input.type(), "ask_d4_25_45_56_58_cuda_forward", ([&] {
ask_d4_25_45_56_58_cuda_forward_kernel<scalar_t><<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels,
input.packed_accessor32<scalar_t,4,torch::RestrictPtrTraits>(),
weights.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(),
output.packed_accessor32<scalar_t,5,torch::RestrictPtrTraits>(),
output_channels, input_channels, input_height, input_width, output_height, output_width, stride);
}));
return at::sum(output, 2);
}
|
739968fd4bf87e1664bfaf7d3adb27508fcfae3a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ParticleMethodsCuda.cuh"
#include <random>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include "qt_windows.h"
#include <cuda_gl_interop.h>
#include <stdio.h>
#include <time.h>
//Computes the gravitational force on body 1 (my_particle) by part 2 (force_particle)
__device__ glm::vec3 BodyBodyInteraction(Particle* my_particle, Particle* force_particle,
float gravitational_constant, float softening_factor)
{
glm::vec3 r( force_particle->position - my_particle->position);
// distSqr = dot(r_ij, r_ij) + EPS^2 [6 FLOPS]
float distSqr = glm::dot(r, r) + softening_factor;
// invDistCube =1/distSqr^(3/2)
float distSixth = distSqr * distSqr * distSqr;
float invDistCube = 1.0f / sqrtf(distSixth);
// s = m_j * invDistCube [1 FLOP]
float s = force_particle->weight * invDistCube;
return r * (gravitational_constant * s );
}
__device__ glm::vec3 tile_calculation( Particle* my_particle, float gravitational_constant, float softening_factor)
{
extern __shared__ Particle shared_particles[];
if(!my_particle)
return glm::vec3(0);
glm::vec3 accel(0);
for (int i = 0; i < blockDim.x; i++)
accel += BodyBodyInteraction(my_particle, &shared_particles[i], gravitational_constant, softening_factor);
return accel;
}
//creates 2 clusters of particles
__global__ void InitParticlesValuesDualGalaxy(Particle* particles, unsigned num_particles)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x; // handle the data at this index
hiprandState_t state;
hiprand_init(1234, tid, 0, &state);
while (tid < num_particles)
{
float random_x_pos = hiprand_normal(&state);
float random_y_pos = hiprand_normal(&state);
float random_z_pos = hiprand_normal(&state);
float weight = abs( hiprand_normal(&state) * 10.0f );
glm::vec3 position_base = tid % 2 == 0 ? glm::vec3(7.5, 2, 0.0) : glm::vec3(-7.5, -2, 0.0);
particles[tid].position.x = position_base.x + (random_x_pos * 1.6f - .8f);
particles[tid].position.y = position_base.y + (random_y_pos * 1.6f - .8f);
particles[tid].position.z = position_base.z + ( (random_z_pos * 1.6f - .8f) + .2f ) * .1;
particles[tid].weight = ( weight ) ;
particles[tid].velocity = glm::vec3(0);
tid += blockDim.x*gridDim.x;
}
}
//creates 1 clusters of particles
__global__ void InitParticlesValuesGalaxy(Particle* particles, unsigned num_particles)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x; // handle the data at this index
hiprandState_t state;
hiprand_init(1234, tid, 0, &state);
while (tid < num_particles)
{
float random_x_pos = hiprand_normal(&state);
float random_y_pos = hiprand_normal(&state);
float random_z_pos = hiprand_normal(&state);
float weight = abs(hiprand_normal(&state) * 10.0f);
glm::vec3 position_base = glm::vec3(0, 0, 0.0);
particles[tid].position.x = position_base.x + (random_x_pos * 3.2f - 1.6f);
particles[tid].position.y = position_base.y + (random_y_pos * 3.2f - 1.6f);
particles[tid].position.z = position_base.z + ((random_z_pos * 1.6f - .8f) + .2f) * .1;
particles[tid].weight = (weight);
particles[tid].velocity = glm::vec3(0);
tid += blockDim.x*gridDim.x;
}
}
//creates cluster of particles randomly scattered in sphere
__global__ void InitParticlesValuesSphere(Particle* particles, unsigned num_particles)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x; // handle the data at this index
hiprandState_t state;
hiprand_init(1234, tid, 0, &state);
float pi = 3.1415926535897f;
while ( tid < num_particles )
{
//spherical coordinates
float random_phi = abs( hiprand_normal(&state) ) * 2 * pi;
float random_theta = abs(hiprand_normal(&state)) * pi;
const float r = 15.0f;
float weight = abs(hiprand_normal(&state) * 10.0f);
particles[tid].position.x = r * sin(random_theta) *cos(random_phi);
particles[tid].position.y = r * sin(random_theta) *sin(random_phi);
particles[tid].position.z = r * cos(random_theta);
particles[tid].weight = (weight);
//have velocity point towards the sphere center (0,0,0)
particles[tid].velocity = glm::normalize(-particles[tid].position) * glm::vec3(3.0f);
tid += blockDim.x*gridDim.x;
}
}
__global__ void UpdateVelocity(Particle* particles, unsigned num_particles, float delta_time,
float gravitational_constant, float softening_factor)
{
//shared memory for faster read and writes
extern __shared__ Particle shared_particles[];
glm::vec3 acceleration(0.0f);
int tid = threadIdx.x + blockIdx.x * blockDim.x; // handle the data at this index
Particle *my_particle = 0;
if ( tid < num_particles )
my_particle= &particles[tid];
int i, tile;
//calculate body forces over num_particles/ tile_size tiles
for( i = 0, tile = 0; i < num_particles; i += 256, tile++ )
{
int idx = tile * blockDim.x + threadIdx.x;
if (idx < num_particles)
shared_particles[threadIdx.x] = particles[idx];
else
shared_particles[threadIdx.x].weight = 0;
__syncthreads();
acceleration += tile_calculation( my_particle , gravitational_constant, softening_factor);
__syncthreads();
}
if (tid < num_particles)
particles[tid].velocity += acceleration * delta_time;
}
__global__ void UpdatePosition(Particle* particles, unsigned num_particles, float delta_time )
{
int tid = threadIdx.x + blockIdx.x * blockDim.x; // handle the data at this index
// update position based on computed velocity
while (tid < num_particles)
{
particles[tid].position.x += particles[tid].velocity.x * delta_time;
particles[tid].position.y += particles[tid].velocity.y * delta_time;
particles[tid].position.z += particles[tid].velocity.z * delta_time;
tid += blockDim.x*gridDim.x;
}
}
ParticleCudaOperations::~ParticleCudaOperations() = default;
void ParticleCudaOperations::InitParticles( unsigned part_vbo, unsigned num_particles, unsigned initalization_condition )
{
void* part_pointer;
// Map the buffer to CUDA
cudaGraphicsResource_t resource;
hipGraphicsGLRegisterBuffer(&resource, part_vbo, hipGraphicsRegisterFlagsNone);
hipGraphicsMapResources(1, &resource, 0);
hipGraphicsResourceGetMappedPointer(&part_pointer, 0, resource);
switch ( initalization_condition )
{
case 0:
{
InitParticlesValuesGalaxy << < 256, 256 >> > ((Particle*)part_pointer, num_particles);
break;
}
case 1:
{
InitParticlesValuesDualGalaxy << < 256, 256 >> > ((Particle*)part_pointer, num_particles);
break;
}
case 2:
{
InitParticlesValuesSphere << < 256, 256 >> > ((Particle*)part_pointer, num_particles);
break;
}
default:
break;
}
hipDeviceSynchronize();
hipGraphicsUnmapResources(1, &resource);
}
void ParticleCudaOperations::UpdateParticles( unsigned part_vbo, unsigned num_particles, float delta_time,
float gravitational_constant, float softening_factor)
{
void * part_pointer;
// Map the buffer to CUDA
cudaGraphicsResource_t resource;
hipGraphicsGLRegisterBuffer(&resource, part_vbo, hipGraphicsRegisterFlagsNone );
hipGraphicsMapResources(1, &resource, 0);
hipGraphicsResourceGetMappedPointer(&part_pointer, 0, resource);
UpdateVelocity << < 256, 256, 256 * sizeof( Particle ) >> > ( (Particle*)part_pointer, num_particles, delta_time , gravitational_constant, softening_factor);
hipDeviceSynchronize();
UpdatePosition << < 256, 256 >> > ( (Particle*)part_pointer, num_particles, .0167);
hipDeviceSynchronize();
hipGraphicsUnmapResources(1, &resource);
}
|
739968fd4bf87e1664bfaf7d3adb27508fcfae3a.cu
|
#include "ParticleMethodsCuda.cuh"
#include <random>
#include <curand.h>
#include <curand_kernel.h>
#include "qt_windows.h"
#include <cuda_gl_interop.h>
#include <stdio.h>
#include <time.h>
//Computes the gravitational force on body 1 (my_particle) by part 2 (force_particle)
__device__ glm::vec3 BodyBodyInteraction(Particle* my_particle, Particle* force_particle,
float gravitational_constant, float softening_factor)
{
glm::vec3 r( force_particle->position - my_particle->position);
// distSqr = dot(r_ij, r_ij) + EPS^2 [6 FLOPS]
float distSqr = glm::dot(r, r) + softening_factor;
// invDistCube =1/distSqr^(3/2)
float distSixth = distSqr * distSqr * distSqr;
float invDistCube = 1.0f / sqrtf(distSixth);
// s = m_j * invDistCube [1 FLOP]
float s = force_particle->weight * invDistCube;
return r * (gravitational_constant * s );
}
__device__ glm::vec3 tile_calculation( Particle* my_particle, float gravitational_constant, float softening_factor)
{
extern __shared__ Particle shared_particles[];
if(!my_particle)
return glm::vec3(0);
glm::vec3 accel(0);
for (int i = 0; i < blockDim.x; i++)
accel += BodyBodyInteraction(my_particle, &shared_particles[i], gravitational_constant, softening_factor);
return accel;
}
//creates 2 clusters of particles
__global__ void InitParticlesValuesDualGalaxy(Particle* particles, unsigned num_particles)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x; // handle the data at this index
curandState state;
curand_init(1234, tid, 0, &state);
while (tid < num_particles)
{
float random_x_pos = curand_normal(&state);
float random_y_pos = curand_normal(&state);
float random_z_pos = curand_normal(&state);
float weight = abs( curand_normal(&state) * 10.0f );
glm::vec3 position_base = tid % 2 == 0 ? glm::vec3(7.5, 2, 0.0) : glm::vec3(-7.5, -2, 0.0);
particles[tid].position.x = position_base.x + (random_x_pos * 1.6f - .8f);
particles[tid].position.y = position_base.y + (random_y_pos * 1.6f - .8f);
particles[tid].position.z = position_base.z + ( (random_z_pos * 1.6f - .8f) + .2f ) * .1;
particles[tid].weight = ( weight ) ;
particles[tid].velocity = glm::vec3(0);
tid += blockDim.x*gridDim.x;
}
}
//creates 1 clusters of particles
__global__ void InitParticlesValuesGalaxy(Particle* particles, unsigned num_particles)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x; // handle the data at this index
curandState state;
curand_init(1234, tid, 0, &state);
while (tid < num_particles)
{
float random_x_pos = curand_normal(&state);
float random_y_pos = curand_normal(&state);
float random_z_pos = curand_normal(&state);
float weight = abs(curand_normal(&state) * 10.0f);
glm::vec3 position_base = glm::vec3(0, 0, 0.0);
particles[tid].position.x = position_base.x + (random_x_pos * 3.2f - 1.6f);
particles[tid].position.y = position_base.y + (random_y_pos * 3.2f - 1.6f);
particles[tid].position.z = position_base.z + ((random_z_pos * 1.6f - .8f) + .2f) * .1;
particles[tid].weight = (weight);
particles[tid].velocity = glm::vec3(0);
tid += blockDim.x*gridDim.x;
}
}
//creates cluster of particles randomly scattered in sphere
__global__ void InitParticlesValuesSphere(Particle* particles, unsigned num_particles)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x; // handle the data at this index
curandState state;
curand_init(1234, tid, 0, &state);
float pi = 3.1415926535897f;
while ( tid < num_particles )
{
//spherical coordinates
float random_phi = abs( curand_normal(&state) ) * 2 * pi;
float random_theta = abs(curand_normal(&state)) * pi;
const float r = 15.0f;
float weight = abs(curand_normal(&state) * 10.0f);
particles[tid].position.x = r * sin(random_theta) *cos(random_phi);
particles[tid].position.y = r * sin(random_theta) *sin(random_phi);
particles[tid].position.z = r * cos(random_theta);
particles[tid].weight = (weight);
//have velocity point towards the sphere center (0,0,0)
particles[tid].velocity = glm::normalize(-particles[tid].position) * glm::vec3(3.0f);
tid += blockDim.x*gridDim.x;
}
}
__global__ void UpdateVelocity(Particle* particles, unsigned num_particles, float delta_time,
float gravitational_constant, float softening_factor)
{
//shared memory for faster read and writes
extern __shared__ Particle shared_particles[];
glm::vec3 acceleration(0.0f);
int tid = threadIdx.x + blockIdx.x * blockDim.x; // handle the data at this index
Particle *my_particle = 0;
if ( tid < num_particles )
my_particle= &particles[tid];
int i, tile;
//calculate body forces over num_particles/ tile_size tiles
for( i = 0, tile = 0; i < num_particles; i += 256, tile++ )
{
int idx = tile * blockDim.x + threadIdx.x;
if (idx < num_particles)
shared_particles[threadIdx.x] = particles[idx];
else
shared_particles[threadIdx.x].weight = 0;
__syncthreads();
acceleration += tile_calculation( my_particle , gravitational_constant, softening_factor);
__syncthreads();
}
if (tid < num_particles)
particles[tid].velocity += acceleration * delta_time;
}
__global__ void UpdatePosition(Particle* particles, unsigned num_particles, float delta_time )
{
int tid = threadIdx.x + blockIdx.x * blockDim.x; // handle the data at this index
// update position based on computed velocity
while (tid < num_particles)
{
particles[tid].position.x += particles[tid].velocity.x * delta_time;
particles[tid].position.y += particles[tid].velocity.y * delta_time;
particles[tid].position.z += particles[tid].velocity.z * delta_time;
tid += blockDim.x*gridDim.x;
}
}
ParticleCudaOperations::~ParticleCudaOperations() = default;
void ParticleCudaOperations::InitParticles( unsigned part_vbo, unsigned num_particles, unsigned initalization_condition )
{
void* part_pointer;
// Map the buffer to CUDA
cudaGraphicsResource_t resource;
cudaGraphicsGLRegisterBuffer(&resource, part_vbo, cudaGraphicsRegisterFlagsNone);
cudaGraphicsMapResources(1, &resource, 0);
cudaGraphicsResourceGetMappedPointer(&part_pointer, 0, resource);
switch ( initalization_condition )
{
case 0:
{
InitParticlesValuesGalaxy << < 256, 256 >> > ((Particle*)part_pointer, num_particles);
break;
}
case 1:
{
InitParticlesValuesDualGalaxy << < 256, 256 >> > ((Particle*)part_pointer, num_particles);
break;
}
case 2:
{
InitParticlesValuesSphere << < 256, 256 >> > ((Particle*)part_pointer, num_particles);
break;
}
default:
break;
}
cudaDeviceSynchronize();
cudaGraphicsUnmapResources(1, &resource);
}
void ParticleCudaOperations::UpdateParticles( unsigned part_vbo, unsigned num_particles, float delta_time,
float gravitational_constant, float softening_factor)
{
void * part_pointer;
// Map the buffer to CUDA
cudaGraphicsResource_t resource;
cudaGraphicsGLRegisterBuffer(&resource, part_vbo, cudaGraphicsRegisterFlagsNone );
cudaGraphicsMapResources(1, &resource, 0);
cudaGraphicsResourceGetMappedPointer(&part_pointer, 0, resource);
UpdateVelocity << < 256, 256, 256 * sizeof( Particle ) >> > ( (Particle*)part_pointer, num_particles, delta_time , gravitational_constant, softening_factor);
cudaDeviceSynchronize();
UpdatePosition << < 256, 256 >> > ( (Particle*)part_pointer, num_particles, .0167);
cudaDeviceSynchronize();
cudaGraphicsUnmapResources(1, &resource);
}
|
533a7ad7966038269a3efdd4e227b178119e4083.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda_compute.h"
#include <stdio.h>
#include "math_constants.h"
#define BLOCK_SIZE 512
#define MAX_WIDTH 2048
#define MAX_TEXTURE_HEIGHT 65536
texture<float, 2 > inputDataTexRef;
#define CUDA_CHECK(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__
void gComputePolarProjection(float *dev_output, size_t pitchPolar, int image_number,
int rows, int cols, int r_min, int r_max, int polar_angles,
float center_y, float center_x, float cval)
{
unsigned int r = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int t = blockIdx.y * blockDim.y + threadIdx.y;
float radius = r_min + r;
float angle_step = 2*CUDART_PI_F/polar_angles;
float angle = -CUDART_PI_F + t*angle_step ;
float res;
if(radius < r_max && t < polar_angles){
float x = radius * cos(angle) + center_x + 0.5f;
float y = radius * sin(angle) + center_y + 0.5f;
if(x<0 || x>= cols || y<0 || y>= rows){
res = cval;
}else{
res = tex2D(inputDataTexRef, x, y + image_number*rows);
}
*((float*)((char*)dev_output + r * pitchPolar) + t) = res;
}
}
__global__ void gCorrelationComputeLine( float *dev_input, float *dev_output, int rows, int cols, size_t pitchInput)
{
int xCoord = blockIdx.x*blockDim.x + threadIdx.x;
int tid = threadIdx.x;
int yCoord = blockIdx.y;
__shared__ float line_data[MAX_WIDTH];
int i,pos;
float val;
float t;
float sum = 0;
if( xCoord<cols && yCoord<rows){
for(i=0; i*BLOCK_SIZE < cols; i++){
if(tid+i*BLOCK_SIZE < cols){
line_data[tid + i*BLOCK_SIZE] = *((float*)((char*)dev_input + yCoord * pitchInput) + tid + i*BLOCK_SIZE); //Copy line of matrix to shared memory
}
}
__syncthreads();
for( i=0; i<cols; i++ ){ //Loop over whole line
val = line_data[i]; //First multiplier
pos = (i+xCoord)%cols; //Coordinate in shifted line(second line)
t = line_data[pos]; //Value in memory cell (pos,yCoord), second multiplier
sum += t * val; //Sum of multiplications
}
sum /= cols; //Divide by line len -> correlation
*((float*)((char*)dev_output + yCoord * pitchInput) + xCoord) = sum; //memory cell (xCoord,yCoord) where results of correlation is saved
}
}
__global__
void gRecoverMask( float *dev_input, int rows, int cols, size_t pitchInput)
{
int xCoord = threadIdx.x;
int yCoord = blockIdx.y;
int i,s;
float val;
float average_val;
float sum_value = 0;
int non_mask = 0;
__shared__ float sum_buf[BLOCK_SIZE];
__shared__ float non_mask_buf[BLOCK_SIZE];
for(i=0; i*BLOCK_SIZE < cols; i++){
if(xCoord + i*BLOCK_SIZE < cols){
val = *((float*)((char*)dev_input + yCoord * pitchInput) + xCoord + i*BLOCK_SIZE);
}else{
val = -1;
}
if(val >= 0){
non_mask = 1;
}else{
val = 0;
non_mask = 0;
}
sum_buf[xCoord] = val;
non_mask_buf[xCoord] = non_mask;
for( s=1; s<BLOCK_SIZE; s*=2) { //Sum reduction
if(xCoord % (2*s) == 0) {
sum_buf[xCoord] += sum_buf[xCoord + s];
non_mask_buf[xCoord] += non_mask_buf[xCoord + s];
}
__syncthreads();
}
if(xCoord == 0) {
sum_value += sum_buf[0];
non_mask += non_mask_buf[0];
}
}
if(xCoord == 0) {
sum_buf[0] = sum_value;
non_mask_buf[0] = non_mask;
}
__syncthreads();
non_mask = non_mask_buf[0];
sum_value = sum_buf[0];
if(non_mask == 0) {
average_val = 0;
}else{
average_val = sum_value/non_mask;
}
for(i=0; i*BLOCK_SIZE < cols; i++){
if(xCoord + i*BLOCK_SIZE < cols){
val = *((float*)((char*)dev_input + yCoord * pitchInput) + xCoord + i*BLOCK_SIZE);
if(val < 0){
*((float*)((char*)dev_input + yCoord * pitchInput) + xCoord + i*BLOCK_SIZE) = average_val;
}
}
}
}
__global__
void gCCFAngle(float *dev_ccf_2d, float *dev_ccf_angle, int polar_angles, int radius_range, size_t pitchF){
int xCoord = blockIdx.x;
int yCoord = threadIdx.y;
int i,s;
__shared__ float sum_buf[BLOCK_SIZE];
float sum_value = 0;
if(xCoord < polar_angles){
for(i=0; i*BLOCK_SIZE < radius_range; i++){
if(yCoord + i*BLOCK_SIZE < radius_range){
sum_buf[yCoord] = *((float*)((char*)dev_ccf_2d + (yCoord + i*BLOCK_SIZE) * pitchF) + xCoord);
}else{
sum_buf[yCoord] = 0;
}
for( s=1; s<BLOCK_SIZE; s*=2) { //Sum reduction
if(yCoord % (2*s) == 0) {
sum_buf[yCoord] += sum_buf[yCoord + s];
}
__syncthreads();
}
if(yCoord == 0){
sum_value += sum_buf[0];
}
}
if(yCoord == 0){
dev_ccf_angle[xCoord] = sum_value/radius_range;
}
}
}
__global__
void gRadAngle(float *dev_polar_input, float *dev_ccf_2d, float *dev_rad, int polar_angles, int radius_range, size_t pitchF){
int yCoord = blockDim.y*blockIdx.y + threadIdx.y;
int i;
float sum = 0;
float n;
float res;
if(yCoord < radius_range){
for(i=0; i<polar_angles; i++){
sum += *((float*)((char*)dev_polar_input + yCoord*pitchF) + i);
}
n = sum*sum/polar_angles;
res = *((float*)((char*)dev_ccf_2d + yCoord*pitchF))/(n==0 ? 1 : n);
dev_rad[yCoord] = res;
}
}
int CudaReprojectToPolar(float *input_data, size_t input_row_stride, float *polar_data, size_t polar_row_stride,
int rows, int cols, int r_min, int r_max, int polar_angles,
float center_y, float center_x, float cval)
{
float *dev_input, *dev_output;
size_t pitchInput, pitchPolar;
// ///////////////////////
// hipEvent_t start, stop;
// float time;
// hipEventCreate(&start);
// hipEventCreate(&stop);
// hipEventRecord(start, 0);
// ///////////////////////
//Memory allocation for input and output arrays
CUDA_CHECK(hipMallocPitch((void**)&dev_input, &pitchInput, sizeof(float)*cols, rows));
CUDA_CHECK(hipMallocPitch((void**)&dev_output, &pitchPolar, sizeof(float)*polar_angles, r_max-r_min));
// CUDA_CHECK(hipMemcpy(dev_input, input_data, rows*input_row_stride, hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy2D(dev_input, pitchInput, input_data, input_row_stride, sizeof(float)*cols, rows, hipMemcpyHostToDevice));
// Specify texture
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
CUDA_CHECK(hipBindTexture2D(NULL, inputDataTexRef, dev_input, channelDesc, cols, rows, pitchInput));
inputDataTexRef.addressMode[0] = hipAddressModeBorder;
inputDataTexRef.addressMode[1] = hipAddressModeBorder;
inputDataTexRef.filterMode = hipFilterModeLinear;
inputDataTexRef.normalized = false;
//Calculation
dim3 projBlock( 32, 32 );
dim3 projGrid((r_max - r_min + projBlock.y - 1) / projBlock.y,
(polar_angles + projBlock.x - 1) / projBlock.x);
hipLaunchKernelGGL(( gComputePolarProjection), dim3(projGrid),dim3(projBlock), 0, 0, dev_output, pitchPolar, 0,
rows, cols, r_min, r_max, polar_angles,
center_y, center_x, cval);
CUDA_CHECK(hipDeviceSynchronize());
CUDA_CHECK(hipMemcpy2D(polar_data, polar_row_stride, dev_output, pitchPolar, sizeof(float)*polar_angles, r_max-r_min, hipMemcpyDeviceToHost));
// hipEventRecord(stop, 0);
// hipEventSynchronize(stop);
// hipEventElapsedTime(&time, start, stop);
// printf ("CUDA execution time: %f ms\n", time);
hipFree( dev_input );
hipFree( dev_output );
return EXIT_SUCCESS;
}
int CudaCorrelateLine(float* input_data, float* output_data, size_t numpy_row_stride, int rows, int cols)
{
float *dev_input, *dev_output;
size_t pitchInput;
if( cols > MAX_WIDTH ){
fprintf( stderr, "Error at %s:%i : %s\n", __FILE__, __LINE__, "Image width exceeds max value, need recompile" );
exit( -1 );
}
//Memory allocation for input and output arrays
CUDA_CHECK(hipMallocPitch((void**)&dev_input, &pitchInput, sizeof(float)*cols, rows));
CUDA_CHECK(hipMallocPitch((void**)&dev_output, &pitchInput, sizeof(float)*cols, rows));
CUDA_CHECK(hipMemcpy2D(dev_input, pitchInput, input_data, numpy_row_stride, sizeof(float)*cols, rows, hipMemcpyHostToDevice));
//RecoverMask
dim3 recoverBlock( BLOCK_SIZE, 1 );
dim3 recoverGrid( 1, rows );
hipLaunchKernelGGL(( gRecoverMask), dim3(recoverGrid),dim3(recoverBlock), 0, 0, dev_input, rows, cols, pitchInput);
//Calculation
dim3 corrBlock( BLOCK_SIZE, 1 );
dim3 corrGrid( cols/BLOCK_SIZE + ((cols%BLOCK_SIZE==0)?0:1), rows );
hipLaunchKernelGGL(( gCorrelationComputeLine), dim3(corrGrid),dim3(corrBlock), 0, 0, dev_input, dev_output, rows, cols, pitchInput);
CUDA_CHECK(hipDeviceSynchronize());
CUDA_CHECK(hipMemcpy2D(output_data, numpy_row_stride, dev_output, pitchInput, sizeof(float)*cols, rows, hipMemcpyDeviceToHost));
hipFree(dev_input);
hipFree(dev_output);
return EXIT_SUCCESS;
}
int CudaReprojectAndCorrelate(float* input_data, size_t input_row_stride, float* ccf_data, float* rad_data,
int rows, int cols, int r_min, int r_max, int polar_angles,
float center_y, float center_x, float cval)
{
float *dev_input, *dev_polar_input, *dev_ccf_2d, *dev_rad;
float *dev_ccf_angle;
size_t pitchInput, pitchPolar;
// ///////////////////////
// hipEvent_t start, stop;
// float time;
// hipEventCreate(&start);
// hipEventCreate(&stop);
// hipEventRecord(start, 0);
// ///////////////////////
//Memory allocation for input and output arrays
CUDA_CHECK(hipMallocPitch((void**)&dev_input, &pitchInput, sizeof(float)*cols, rows));
CUDA_CHECK(hipMallocPitch((void**)&dev_polar_input, &pitchPolar, sizeof(float)*polar_angles, r_max-r_min));
CUDA_CHECK(hipMallocPitch((void**)&dev_ccf_2d, &pitchPolar, sizeof(float)*polar_angles, r_max-r_min));
CUDA_CHECK(hipMalloc((void**)&dev_ccf_angle, sizeof(float)*polar_angles));
CUDA_CHECK(hipMalloc((void**)&dev_rad, sizeof(float)*(r_max - r_min)));
//Memory copying&initialisation for input data and error-check
CUDA_CHECK(hipMemcpy2D(dev_input, pitchInput, input_data, input_row_stride, sizeof(float)*cols, rows, hipMemcpyHostToDevice));
//Calculation
dim3 projBlock( 32, 32 );
dim3 projGrid((r_max - r_min + projBlock.y - 1) / projBlock.y,
(polar_angles + projBlock.x - 1) / projBlock.x);
// Specify texture
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
CUDA_CHECK(hipBindTexture2D(NULL, inputDataTexRef, dev_input, channelDesc, cols, rows, pitchInput));
inputDataTexRef.addressMode[0] = hipAddressModeBorder;
inputDataTexRef.addressMode[1] = hipAddressModeBorder;
inputDataTexRef.filterMode = hipFilterModeLinear;
inputDataTexRef.normalized = false;
//Projection calculation
hipLaunchKernelGGL(( gComputePolarProjection), dim3(projGrid),dim3(projBlock), 0, 0, dev_polar_input, pitchPolar, 0,
rows, cols, r_min, r_max, polar_angles,
center_y, center_x, 0);
CUDA_CHECK(hipDeviceSynchronize());
CUDA_CHECK(hipUnbindTexture(inputDataTexRef));
//RecoverMask
dim3 recoverBlock( BLOCK_SIZE, 1 );
dim3 recoverGrid( 1, r_max-r_min );
hipLaunchKernelGGL(( gRecoverMask), dim3(recoverGrid),dim3(recoverBlock), 0, 0, dev_polar_input, r_max-r_min, polar_angles, pitchPolar);
dim3 corrBlock( BLOCK_SIZE, 1 );
dim3 corrGrid( cols/BLOCK_SIZE + ((cols%BLOCK_SIZE==0)?0:1), rows );
hipLaunchKernelGGL(( gCorrelationComputeLine), dim3(corrGrid),dim3(corrBlock), 0, 0, dev_polar_input, dev_ccf_2d, r_max-r_min, polar_angles, pitchPolar);
dim3 angleBlock( 1, BLOCK_SIZE );
dim3 angleGrid( polar_angles, 1 );
hipLaunchKernelGGL(( gCCFAngle), dim3(angleGrid),dim3(angleBlock), 0, 0, dev_ccf_2d, dev_ccf_angle, polar_angles, r_max-r_min, pitchPolar);
CUDA_CHECK(hipDeviceSynchronize());
dim3 radBlock( 1, BLOCK_SIZE );
dim3 radGrid( 1, (r_max - r_min + radBlock.y - 1) / radBlock.y );
hipLaunchKernelGGL(( gRadAngle), dim3(radGrid),dim3(radBlock), 0, 0, dev_polar_input, dev_ccf_2d, dev_rad, polar_angles, r_max-r_min, pitchPolar);
CUDA_CHECK(hipMemcpy(ccf_data, dev_ccf_angle, polar_angles*sizeof(float), hipMemcpyDeviceToHost));
CUDA_CHECK(hipMemcpy(rad_data, dev_rad, (r_max-r_min)*sizeof(float), hipMemcpyDeviceToHost));
// ///////////////////////////
// hipEventRecord(stop, 0);
// hipEventSynchronize(stop);
// hipEventElapsedTime(&time, start, stop);
// printf ("CUDA total time: %f ms\n", time);
// ///////////////////////////
hipFree(dev_input);
hipFree(dev_polar_input);
hipFree(dev_ccf_2d);
hipFree(dev_ccf_angle);
return EXIT_SUCCESS;
}
int CudaReprojectAndCorrelateArray(float* input_data, int num_images, size_t input_image_stride, size_t input_row_stride,
float* ccf_data, size_t ccf_row_stride,
float* rad_data, size_t rad_row_stride,
int rows, int cols, int r_min, int r_max, int polar_angles,
float center_y, float center_x, float cval)
{
float *dev_input, *dev_polar_input, *dev_ccf_2d, *dev_rad;
float *dev_ccf_angle;
size_t pitchInput, pitchPolar, pitchRadial;
// ///////////////////////////
// hipEvent_t start, stop;
// float time;
// hipEventCreate(&start);
// hipEventCreate(&stop);
// hipEventRecord(start, 0);
// ///////////////////////////
//Memory allocation for input and output arrays
CUDA_CHECK(hipMallocPitch((void**)&dev_input, &pitchInput, sizeof(float)*cols, rows*num_images));
CUDA_CHECK(hipMallocPitch((void**)&dev_polar_input, &pitchPolar, sizeof(float)*polar_angles, r_max-r_min));
CUDA_CHECK(hipMallocPitch((void**)&dev_ccf_2d, &pitchPolar, sizeof(float)*polar_angles, r_max-r_min));
CUDA_CHECK(hipMallocPitch((void**)&dev_ccf_angle, &pitchPolar, sizeof(float)*polar_angles, num_images));
CUDA_CHECK(hipMallocPitch((void**)&dev_rad, &pitchRadial, sizeof(float)*(r_max - r_min), num_images));
// Texture requirements
int batch_n;
if( num_images*rows >= MAX_TEXTURE_HEIGHT ){ //Texture height is not enough
batch_n = MAX_TEXTURE_HEIGHT/rows;
}else{
batch_n = num_images;
}
//Memory copying&initialisation for input data and error-check
CUDA_CHECK(hipMemcpy2D(dev_input, pitchInput, input_data, input_row_stride, sizeof(float)*cols, rows*num_images, hipMemcpyHostToDevice));
int textureImageStride = -batch_n; //Force texture respecification on first iteration
for(int n=0; n<num_images; n++){
if( (n - textureImageStride) >= batch_n ){ // Respecify texture
textureImageStride = n;
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
CUDA_CHECK(hipBindTexture2D(NULL, inputDataTexRef, (float *)((char *)dev_input + textureImageStride*rows*pitchInput), channelDesc, cols, rows*batch_n, pitchInput));
inputDataTexRef.addressMode[0] = hipAddressModeBorder;
inputDataTexRef.addressMode[1] = hipAddressModeBorder;
inputDataTexRef.filterMode = hipFilterModeLinear;
inputDataTexRef.normalized = false;
}
//Calculation
dim3 projBlock( 32, 32 );
dim3 projGrid((r_max - r_min + projBlock.y - 1) / projBlock.y,
(polar_angles + projBlock.x - 1) / projBlock.x);
//Projection calculation
hipLaunchKernelGGL(( gComputePolarProjection), dim3(projGrid),dim3(projBlock), 0, 0, dev_polar_input, pitchPolar, n - textureImageStride,
rows, cols, r_min, r_max, polar_angles,
center_y, center_x, 0);
//RecoverMask
dim3 recoverBlock( BLOCK_SIZE, 1 );
dim3 recoverGrid( 1, r_max-r_min );
hipLaunchKernelGGL(( gRecoverMask), dim3(recoverGrid),dim3(recoverBlock), 0, 0, dev_polar_input, r_max-r_min, polar_angles, pitchPolar);
dim3 corrBlock( BLOCK_SIZE, 1 );
dim3 corrGrid( cols/BLOCK_SIZE + ((cols%BLOCK_SIZE==0)?0:1), rows );
hipLaunchKernelGGL(( gCorrelationComputeLine), dim3(corrGrid),dim3(corrBlock), 0, 0, dev_polar_input, dev_ccf_2d, r_max-r_min, polar_angles, pitchPolar);
dim3 angleBlock( 1, BLOCK_SIZE );
dim3 angleGrid( polar_angles, 1 );
hipLaunchKernelGGL(( gCCFAngle), dim3(angleGrid),dim3(angleBlock), 0, 0, dev_ccf_2d, (float *)((char *)dev_ccf_angle + n*pitchPolar), polar_angles, r_max-r_min, pitchPolar);
dim3 radBlock( 1, BLOCK_SIZE );
dim3 radGrid( 1, (r_max - r_min + radBlock.y - 1) / radBlock.y );
hipLaunchKernelGGL(( gRadAngle), dim3(radGrid),dim3(radBlock), 0, 0, dev_polar_input, dev_ccf_2d, (float *)((char *)dev_rad + n*pitchRadial), polar_angles, r_max-r_min, pitchPolar);
CUDA_CHECK(hipDeviceSynchronize());
CUDA_CHECK(hipMemcpy2D(ccf_data, ccf_row_stride, dev_ccf_angle, pitchPolar, polar_angles*sizeof(float), num_images, hipMemcpyDeviceToHost));
CUDA_CHECK(hipMemcpy2D(rad_data, rad_row_stride, dev_rad, pitchRadial, (r_max - r_min)*sizeof(float), num_images, hipMemcpyDeviceToHost));
}
// ///////////////////////////
// hipEventRecord(stop, 0);
// hipEventSynchronize(stop);
// hipEventElapsedTime(&time, start, stop);
// printf ("CUDA total time: %f ms\n", time);
// ///////////////////////////
hipFree(dev_input);
hipFree(dev_polar_input);
hipFree(dev_ccf_2d);
hipFree(dev_ccf_angle);
return EXIT_SUCCESS;
}
|
533a7ad7966038269a3efdd4e227b178119e4083.cu
|
#include "cuda_compute.h"
#include <stdio.h>
#include "math_constants.h"
#define BLOCK_SIZE 512
#define MAX_WIDTH 2048
#define MAX_TEXTURE_HEIGHT 65536
texture<float, 2 > inputDataTexRef;
#define CUDA_CHECK(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__
void gComputePolarProjection(float *dev_output, size_t pitchPolar, int image_number,
int rows, int cols, int r_min, int r_max, int polar_angles,
float center_y, float center_x, float cval)
{
unsigned int r = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int t = blockIdx.y * blockDim.y + threadIdx.y;
float radius = r_min + r;
float angle_step = 2*CUDART_PI_F/polar_angles;
float angle = -CUDART_PI_F + t*angle_step ;
float res;
if(radius < r_max && t < polar_angles){
float x = radius * cos(angle) + center_x + 0.5f;
float y = radius * sin(angle) + center_y + 0.5f;
if(x<0 || x>= cols || y<0 || y>= rows){
res = cval;
}else{
res = tex2D(inputDataTexRef, x, y + image_number*rows);
}
*((float*)((char*)dev_output + r * pitchPolar) + t) = res;
}
}
__global__ void gCorrelationComputeLine( float *dev_input, float *dev_output, int rows, int cols, size_t pitchInput)
{
int xCoord = blockIdx.x*blockDim.x + threadIdx.x;
int tid = threadIdx.x;
int yCoord = blockIdx.y;
__shared__ float line_data[MAX_WIDTH];
int i,pos;
float val;
float t;
float sum = 0;
if( xCoord<cols && yCoord<rows){
for(i=0; i*BLOCK_SIZE < cols; i++){
if(tid+i*BLOCK_SIZE < cols){
line_data[tid + i*BLOCK_SIZE] = *((float*)((char*)dev_input + yCoord * pitchInput) + tid + i*BLOCK_SIZE); //Copy line of matrix to shared memory
}
}
__syncthreads();
for( i=0; i<cols; i++ ){ //Loop over whole line
val = line_data[i]; //First multiplier
pos = (i+xCoord)%cols; //Coordinate in shifted line(second line)
t = line_data[pos]; //Value in memory cell (pos,yCoord), second multiplier
sum += t * val; //Sum of multiplications
}
sum /= cols; //Divide by line len -> correlation
*((float*)((char*)dev_output + yCoord * pitchInput) + xCoord) = sum; //memory cell (xCoord,yCoord) where results of correlation is saved
}
}
__global__
void gRecoverMask( float *dev_input, int rows, int cols, size_t pitchInput)
{
int xCoord = threadIdx.x;
int yCoord = blockIdx.y;
int i,s;
float val;
float average_val;
float sum_value = 0;
int non_mask = 0;
__shared__ float sum_buf[BLOCK_SIZE];
__shared__ float non_mask_buf[BLOCK_SIZE];
for(i=0; i*BLOCK_SIZE < cols; i++){
if(xCoord + i*BLOCK_SIZE < cols){
val = *((float*)((char*)dev_input + yCoord * pitchInput) + xCoord + i*BLOCK_SIZE);
}else{
val = -1;
}
if(val >= 0){
non_mask = 1;
}else{
val = 0;
non_mask = 0;
}
sum_buf[xCoord] = val;
non_mask_buf[xCoord] = non_mask;
for( s=1; s<BLOCK_SIZE; s*=2) { //Sum reduction
if(xCoord % (2*s) == 0) {
sum_buf[xCoord] += sum_buf[xCoord + s];
non_mask_buf[xCoord] += non_mask_buf[xCoord + s];
}
__syncthreads();
}
if(xCoord == 0) {
sum_value += sum_buf[0];
non_mask += non_mask_buf[0];
}
}
if(xCoord == 0) {
sum_buf[0] = sum_value;
non_mask_buf[0] = non_mask;
}
__syncthreads();
non_mask = non_mask_buf[0];
sum_value = sum_buf[0];
if(non_mask == 0) {
average_val = 0;
}else{
average_val = sum_value/non_mask;
}
for(i=0; i*BLOCK_SIZE < cols; i++){
if(xCoord + i*BLOCK_SIZE < cols){
val = *((float*)((char*)dev_input + yCoord * pitchInput) + xCoord + i*BLOCK_SIZE);
if(val < 0){
*((float*)((char*)dev_input + yCoord * pitchInput) + xCoord + i*BLOCK_SIZE) = average_val;
}
}
}
}
__global__
void gCCFAngle(float *dev_ccf_2d, float *dev_ccf_angle, int polar_angles, int radius_range, size_t pitchF){
int xCoord = blockIdx.x;
int yCoord = threadIdx.y;
int i,s;
__shared__ float sum_buf[BLOCK_SIZE];
float sum_value = 0;
if(xCoord < polar_angles){
for(i=0; i*BLOCK_SIZE < radius_range; i++){
if(yCoord + i*BLOCK_SIZE < radius_range){
sum_buf[yCoord] = *((float*)((char*)dev_ccf_2d + (yCoord + i*BLOCK_SIZE) * pitchF) + xCoord);
}else{
sum_buf[yCoord] = 0;
}
for( s=1; s<BLOCK_SIZE; s*=2) { //Sum reduction
if(yCoord % (2*s) == 0) {
sum_buf[yCoord] += sum_buf[yCoord + s];
}
__syncthreads();
}
if(yCoord == 0){
sum_value += sum_buf[0];
}
}
if(yCoord == 0){
dev_ccf_angle[xCoord] = sum_value/radius_range;
}
}
}
__global__
void gRadAngle(float *dev_polar_input, float *dev_ccf_2d, float *dev_rad, int polar_angles, int radius_range, size_t pitchF){
int yCoord = blockDim.y*blockIdx.y + threadIdx.y;
int i;
float sum = 0;
float n;
float res;
if(yCoord < radius_range){
for(i=0; i<polar_angles; i++){
sum += *((float*)((char*)dev_polar_input + yCoord*pitchF) + i);
}
n = sum*sum/polar_angles;
res = *((float*)((char*)dev_ccf_2d + yCoord*pitchF))/(n==0 ? 1 : n);
dev_rad[yCoord] = res;
}
}
int CudaReprojectToPolar(float *input_data, size_t input_row_stride, float *polar_data, size_t polar_row_stride,
int rows, int cols, int r_min, int r_max, int polar_angles,
float center_y, float center_x, float cval)
{
float *dev_input, *dev_output;
size_t pitchInput, pitchPolar;
// ///////////////////////
// cudaEvent_t start, stop;
// float time;
// cudaEventCreate(&start);
// cudaEventCreate(&stop);
// cudaEventRecord(start, 0);
// ///////////////////////
//Memory allocation for input and output arrays
CUDA_CHECK(cudaMallocPitch((void**)&dev_input, &pitchInput, sizeof(float)*cols, rows));
CUDA_CHECK(cudaMallocPitch((void**)&dev_output, &pitchPolar, sizeof(float)*polar_angles, r_max-r_min));
// CUDA_CHECK(cudaMemcpy(dev_input, input_data, rows*input_row_stride, cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy2D(dev_input, pitchInput, input_data, input_row_stride, sizeof(float)*cols, rows, cudaMemcpyHostToDevice));
// Specify texture
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
CUDA_CHECK(cudaBindTexture2D(NULL, inputDataTexRef, dev_input, channelDesc, cols, rows, pitchInput));
inputDataTexRef.addressMode[0] = cudaAddressModeBorder;
inputDataTexRef.addressMode[1] = cudaAddressModeBorder;
inputDataTexRef.filterMode = cudaFilterModeLinear;
inputDataTexRef.normalized = false;
//Calculation
dim3 projBlock( 32, 32 );
dim3 projGrid((r_max - r_min + projBlock.y - 1) / projBlock.y,
(polar_angles + projBlock.x - 1) / projBlock.x);
gComputePolarProjection<<<projGrid,projBlock>>>(dev_output, pitchPolar, 0,
rows, cols, r_min, r_max, polar_angles,
center_y, center_x, cval);
CUDA_CHECK(cudaDeviceSynchronize());
CUDA_CHECK(cudaMemcpy2D(polar_data, polar_row_stride, dev_output, pitchPolar, sizeof(float)*polar_angles, r_max-r_min, cudaMemcpyDeviceToHost));
// cudaEventRecord(stop, 0);
// cudaEventSynchronize(stop);
// cudaEventElapsedTime(&time, start, stop);
// printf ("CUDA execution time: %f ms\n", time);
cudaFree( dev_input );
cudaFree( dev_output );
return EXIT_SUCCESS;
}
int CudaCorrelateLine(float* input_data, float* output_data, size_t numpy_row_stride, int rows, int cols)
{
float *dev_input, *dev_output;
size_t pitchInput;
if( cols > MAX_WIDTH ){
fprintf( stderr, "Error at %s:%i : %s\n", __FILE__, __LINE__, "Image width exceeds max value, need recompile" );
exit( -1 );
}
//Memory allocation for input and output arrays
CUDA_CHECK(cudaMallocPitch((void**)&dev_input, &pitchInput, sizeof(float)*cols, rows));
CUDA_CHECK(cudaMallocPitch((void**)&dev_output, &pitchInput, sizeof(float)*cols, rows));
CUDA_CHECK(cudaMemcpy2D(dev_input, pitchInput, input_data, numpy_row_stride, sizeof(float)*cols, rows, cudaMemcpyHostToDevice));
//RecoverMask
dim3 recoverBlock( BLOCK_SIZE, 1 );
dim3 recoverGrid( 1, rows );
gRecoverMask<<<recoverGrid,recoverBlock>>>(dev_input, rows, cols, pitchInput);
//Calculation
dim3 corrBlock( BLOCK_SIZE, 1 );
dim3 corrGrid( cols/BLOCK_SIZE + ((cols%BLOCK_SIZE==0)?0:1), rows );
gCorrelationComputeLine<<<corrGrid,corrBlock>>>(dev_input, dev_output, rows, cols, pitchInput);
CUDA_CHECK(cudaDeviceSynchronize());
CUDA_CHECK(cudaMemcpy2D(output_data, numpy_row_stride, dev_output, pitchInput, sizeof(float)*cols, rows, cudaMemcpyDeviceToHost));
cudaFree(dev_input);
cudaFree(dev_output);
return EXIT_SUCCESS;
}
int CudaReprojectAndCorrelate(float* input_data, size_t input_row_stride, float* ccf_data, float* rad_data,
int rows, int cols, int r_min, int r_max, int polar_angles,
float center_y, float center_x, float cval)
{
float *dev_input, *dev_polar_input, *dev_ccf_2d, *dev_rad;
float *dev_ccf_angle;
size_t pitchInput, pitchPolar;
// ///////////////////////
// cudaEvent_t start, stop;
// float time;
// cudaEventCreate(&start);
// cudaEventCreate(&stop);
// cudaEventRecord(start, 0);
// ///////////////////////
//Memory allocation for input and output arrays
CUDA_CHECK(cudaMallocPitch((void**)&dev_input, &pitchInput, sizeof(float)*cols, rows));
CUDA_CHECK(cudaMallocPitch((void**)&dev_polar_input, &pitchPolar, sizeof(float)*polar_angles, r_max-r_min));
CUDA_CHECK(cudaMallocPitch((void**)&dev_ccf_2d, &pitchPolar, sizeof(float)*polar_angles, r_max-r_min));
CUDA_CHECK(cudaMalloc((void**)&dev_ccf_angle, sizeof(float)*polar_angles));
CUDA_CHECK(cudaMalloc((void**)&dev_rad, sizeof(float)*(r_max - r_min)));
//Memory copying&initialisation for input data and error-check
CUDA_CHECK(cudaMemcpy2D(dev_input, pitchInput, input_data, input_row_stride, sizeof(float)*cols, rows, cudaMemcpyHostToDevice));
//Calculation
dim3 projBlock( 32, 32 );
dim3 projGrid((r_max - r_min + projBlock.y - 1) / projBlock.y,
(polar_angles + projBlock.x - 1) / projBlock.x);
// Specify texture
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
CUDA_CHECK(cudaBindTexture2D(NULL, inputDataTexRef, dev_input, channelDesc, cols, rows, pitchInput));
inputDataTexRef.addressMode[0] = cudaAddressModeBorder;
inputDataTexRef.addressMode[1] = cudaAddressModeBorder;
inputDataTexRef.filterMode = cudaFilterModeLinear;
inputDataTexRef.normalized = false;
//Projection calculation
gComputePolarProjection<<<projGrid,projBlock>>>(dev_polar_input, pitchPolar, 0,
rows, cols, r_min, r_max, polar_angles,
center_y, center_x, 0);
CUDA_CHECK(cudaDeviceSynchronize());
CUDA_CHECK(cudaUnbindTexture(inputDataTexRef));
//RecoverMask
dim3 recoverBlock( BLOCK_SIZE, 1 );
dim3 recoverGrid( 1, r_max-r_min );
gRecoverMask<<<recoverGrid,recoverBlock>>>( dev_polar_input, r_max-r_min, polar_angles, pitchPolar);
dim3 corrBlock( BLOCK_SIZE, 1 );
dim3 corrGrid( cols/BLOCK_SIZE + ((cols%BLOCK_SIZE==0)?0:1), rows );
gCorrelationComputeLine<<<corrGrid,corrBlock>>>( dev_polar_input, dev_ccf_2d, r_max-r_min, polar_angles, pitchPolar);
dim3 angleBlock( 1, BLOCK_SIZE );
dim3 angleGrid( polar_angles, 1 );
gCCFAngle<<<angleGrid,angleBlock>>>(dev_ccf_2d, dev_ccf_angle, polar_angles, r_max-r_min, pitchPolar);
CUDA_CHECK(cudaDeviceSynchronize());
dim3 radBlock( 1, BLOCK_SIZE );
dim3 radGrid( 1, (r_max - r_min + radBlock.y - 1) / radBlock.y );
gRadAngle<<<radGrid,radBlock>>>(dev_polar_input, dev_ccf_2d, dev_rad, polar_angles, r_max-r_min, pitchPolar);
CUDA_CHECK(cudaMemcpy(ccf_data, dev_ccf_angle, polar_angles*sizeof(float), cudaMemcpyDeviceToHost));
CUDA_CHECK(cudaMemcpy(rad_data, dev_rad, (r_max-r_min)*sizeof(float), cudaMemcpyDeviceToHost));
// ///////////////////////////
// cudaEventRecord(stop, 0);
// cudaEventSynchronize(stop);
// cudaEventElapsedTime(&time, start, stop);
// printf ("CUDA total time: %f ms\n", time);
// ///////////////////////////
cudaFree(dev_input);
cudaFree(dev_polar_input);
cudaFree(dev_ccf_2d);
cudaFree(dev_ccf_angle);
return EXIT_SUCCESS;
}
int CudaReprojectAndCorrelateArray(float* input_data, int num_images, size_t input_image_stride, size_t input_row_stride,
float* ccf_data, size_t ccf_row_stride,
float* rad_data, size_t rad_row_stride,
int rows, int cols, int r_min, int r_max, int polar_angles,
float center_y, float center_x, float cval)
{
float *dev_input, *dev_polar_input, *dev_ccf_2d, *dev_rad;
float *dev_ccf_angle;
size_t pitchInput, pitchPolar, pitchRadial;
// ///////////////////////////
// cudaEvent_t start, stop;
// float time;
// cudaEventCreate(&start);
// cudaEventCreate(&stop);
// cudaEventRecord(start, 0);
// ///////////////////////////
//Memory allocation for input and output arrays
CUDA_CHECK(cudaMallocPitch((void**)&dev_input, &pitchInput, sizeof(float)*cols, rows*num_images));
CUDA_CHECK(cudaMallocPitch((void**)&dev_polar_input, &pitchPolar, sizeof(float)*polar_angles, r_max-r_min));
CUDA_CHECK(cudaMallocPitch((void**)&dev_ccf_2d, &pitchPolar, sizeof(float)*polar_angles, r_max-r_min));
CUDA_CHECK(cudaMallocPitch((void**)&dev_ccf_angle, &pitchPolar, sizeof(float)*polar_angles, num_images));
CUDA_CHECK(cudaMallocPitch((void**)&dev_rad, &pitchRadial, sizeof(float)*(r_max - r_min), num_images));
// Texture requirements
int batch_n;
if( num_images*rows >= MAX_TEXTURE_HEIGHT ){ //Texture height is not enough
batch_n = MAX_TEXTURE_HEIGHT/rows;
}else{
batch_n = num_images;
}
//Memory copying&initialisation for input data and error-check
CUDA_CHECK(cudaMemcpy2D(dev_input, pitchInput, input_data, input_row_stride, sizeof(float)*cols, rows*num_images, cudaMemcpyHostToDevice));
int textureImageStride = -batch_n; //Force texture respecification on first iteration
for(int n=0; n<num_images; n++){
if( (n - textureImageStride) >= batch_n ){ // Respecify texture
textureImageStride = n;
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
CUDA_CHECK(cudaBindTexture2D(NULL, inputDataTexRef, (float *)((char *)dev_input + textureImageStride*rows*pitchInput), channelDesc, cols, rows*batch_n, pitchInput));
inputDataTexRef.addressMode[0] = cudaAddressModeBorder;
inputDataTexRef.addressMode[1] = cudaAddressModeBorder;
inputDataTexRef.filterMode = cudaFilterModeLinear;
inputDataTexRef.normalized = false;
}
//Calculation
dim3 projBlock( 32, 32 );
dim3 projGrid((r_max - r_min + projBlock.y - 1) / projBlock.y,
(polar_angles + projBlock.x - 1) / projBlock.x);
//Projection calculation
gComputePolarProjection<<<projGrid,projBlock>>>(dev_polar_input, pitchPolar, n - textureImageStride,
rows, cols, r_min, r_max, polar_angles,
center_y, center_x, 0);
//RecoverMask
dim3 recoverBlock( BLOCK_SIZE, 1 );
dim3 recoverGrid( 1, r_max-r_min );
gRecoverMask<<<recoverGrid,recoverBlock>>>( dev_polar_input, r_max-r_min, polar_angles, pitchPolar);
dim3 corrBlock( BLOCK_SIZE, 1 );
dim3 corrGrid( cols/BLOCK_SIZE + ((cols%BLOCK_SIZE==0)?0:1), rows );
gCorrelationComputeLine<<<corrGrid,corrBlock>>>( dev_polar_input, dev_ccf_2d, r_max-r_min, polar_angles, pitchPolar);
dim3 angleBlock( 1, BLOCK_SIZE );
dim3 angleGrid( polar_angles, 1 );
gCCFAngle<<<angleGrid,angleBlock>>>(dev_ccf_2d, (float *)((char *)dev_ccf_angle + n*pitchPolar), polar_angles, r_max-r_min, pitchPolar);
dim3 radBlock( 1, BLOCK_SIZE );
dim3 radGrid( 1, (r_max - r_min + radBlock.y - 1) / radBlock.y );
gRadAngle<<<radGrid,radBlock>>>(dev_polar_input, dev_ccf_2d, (float *)((char *)dev_rad + n*pitchRadial), polar_angles, r_max-r_min, pitchPolar);
CUDA_CHECK(cudaDeviceSynchronize());
CUDA_CHECK(cudaMemcpy2D(ccf_data, ccf_row_stride, dev_ccf_angle, pitchPolar, polar_angles*sizeof(float), num_images, cudaMemcpyDeviceToHost));
CUDA_CHECK(cudaMemcpy2D(rad_data, rad_row_stride, dev_rad, pitchRadial, (r_max - r_min)*sizeof(float), num_images, cudaMemcpyDeviceToHost));
}
// ///////////////////////////
// cudaEventRecord(stop, 0);
// cudaEventSynchronize(stop);
// cudaEventElapsedTime(&time, start, stop);
// printf ("CUDA total time: %f ms\n", time);
// ///////////////////////////
cudaFree(dev_input);
cudaFree(dev_polar_input);
cudaFree(dev_ccf_2d);
cudaFree(dev_ccf_angle);
return EXIT_SUCCESS;
}
|
849333df51449e23ce5f95cb4a639df74ea20ea2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
#include <stdio.h>
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
float res = 0.0;
int px = threadIdx.x + blockIdx.x*blockDim.x;
int py = threadIdx.y + blockIdx.y*blockDim.y;
int ps = py*numCols+px;
// if (blockIdx.x != 200 || blockIdx.y != 200)
// return;
if (px >= numCols || py >= numRows)
return;
for (int i = 0; i < filterWidth; i++)
{
for(int j = 0; j < filterWidth; j++)
{
int fx = px - filterWidth/2 + i;
int fy = py - filterWidth/2 + j;
fx = (0 <= fx) ? fx : 0;
fx = (fx < numCols) ? fx : numCols;
fy = (0 < fy) ? fy : 0;
fy = (fy < numRows) ? fy : numRows;
int pc = fx+fy*numCols;
int pf = j*filterWidth+i;
res += inputChannel[pc]*filter[pf];
}
}
outputChannel[ps] = (int)res % 256;
}
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
int px = threadIdx.x + blockIdx.x*blockDim.x;
int py = threadIdx.y + blockIdx.y*blockDim.y;
int ps = py*numCols+px;
if (px >= numCols || py >= numRows)
return;
redChannel[ps] = inputImageRGBA[ps].x;
greenChannel[ps] = inputImageRGBA[ps].y;
blueChannel[ps] = inputImageRGBA[ps].z;
}
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
int size = filterWidth*filterWidth*sizeof(float);
checkCudaErrors(hipMalloc(&d_filter, size));
checkCudaErrors(hipMemcpy(d_filter, h_filter, size, hipMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
const dim3 blockSize(16, 16, 1);
const dim3 gridSize(numCols/blockSize.x+1, numRows/blockSize.y+1, 1);
// const dim3 blockSize(1, 1, 1);
// const dim3 gridSize(1, 1, 1);
//TODO: Launch a kernel for separating the RGBA image into different color channels
hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue);
checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
checkCudaErrors(hipFree(d_filter));
}
|
849333df51449e23ce5f95cb4a639df74ea20ea2.cu
|
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
#include <stdio.h>
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
float res = 0.0;
int px = threadIdx.x + blockIdx.x*blockDim.x;
int py = threadIdx.y + blockIdx.y*blockDim.y;
int ps = py*numCols+px;
// if (blockIdx.x != 200 || blockIdx.y != 200)
// return;
if (px >= numCols || py >= numRows)
return;
for (int i = 0; i < filterWidth; i++)
{
for(int j = 0; j < filterWidth; j++)
{
int fx = px - filterWidth/2 + i;
int fy = py - filterWidth/2 + j;
fx = (0 <= fx) ? fx : 0;
fx = (fx < numCols) ? fx : numCols;
fy = (0 < fy) ? fy : 0;
fy = (fy < numRows) ? fy : numRows;
int pc = fx+fy*numCols;
int pf = j*filterWidth+i;
res += inputChannel[pc]*filter[pf];
}
}
outputChannel[ps] = (int)res % 256;
}
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
int px = threadIdx.x + blockIdx.x*blockDim.x;
int py = threadIdx.y + blockIdx.y*blockDim.y;
int ps = py*numCols+px;
if (px >= numCols || py >= numRows)
return;
redChannel[ps] = inputImageRGBA[ps].x;
greenChannel[ps] = inputImageRGBA[ps].y;
blueChannel[ps] = inputImageRGBA[ps].z;
}
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
int size = filterWidth*filterWidth*sizeof(float);
checkCudaErrors(cudaMalloc(&d_filter, size));
checkCudaErrors(cudaMemcpy(d_filter, h_filter, size, cudaMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
const dim3 blockSize(16, 16, 1);
const dim3 gridSize(numCols/blockSize.x+1, numRows/blockSize.y+1, 1);
// const dim3 blockSize(1, 1, 1);
// const dim3 gridSize(1, 1, 1);
//TODO: Launch a kernel for separating the RGBA image into different color channels
separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue);
checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaGetLastError());
gaussian_blur<<<gridSize, blockSize>>>(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize>>>(d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize>>>(d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaGetLastError());
recombineChannels<<<gridSize, blockSize>>>(d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
checkCudaErrors(cudaFree(d_filter));
}
|
551cd3bc29790d09e135c576e9101ae84dbfbff4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Software License Agreement (BSD License)
*
* Point Cloud Library (PCL) - www.pointclouds.org
* Copyright (c) 2011, Willow Garage, Inc.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "device.hpp"
//#include <boost/graph/buffer_concepts.hpp>
namespace pcl
{
namespace device
{
namespace kinfuLS
{
template<typename T>
__global__ void
initializeVolume (int3 voxels_size,PtrStep<T> volume)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < voxels_size.x && y < voxels_size.y)
{
T *pos = volume.ptr(y) + x;
int z_step = voxels_size.y * volume.step / sizeof(*pos);
#pragma unroll
for(int z = 0; z < voxels_size.z; ++z, pos+=z_step)
pack_tsdf (0.f, 0, *pos);
}
}
template<typename T>
__global__ void
clearSphereKernel(PtrStep<T> volume,int3 shift,float3 center,float radius)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < VOLUME_X && y < VOLUME_Y)
{
int ax = x + shift.x;
if (ax >= VOLUME_X)
ax -= VOLUME_X;
int ay = y + shift.y;
if (ay >= VOLUME_Y)
ay -= VOLUME_Y;
T *pos = volume.ptr(ay) + ax;
int z_step = VOLUME_Y * volume.step / sizeof(*pos);
#pragma unroll
for(int z = 0; z < VOLUME_Z; ++z)
{
int az = z + shift.z;
if (az >= VOLUME_Z)
az -= VOLUME_Z;
float3 pt;
pt.x = float(x);
pt.y = float(y);
pt.z = float(z);
if (norm(pt - center) < radius)
pack_tsdf(0.f, 0, *(pos + (az * z_step)));
}
}
}
template<typename T>
__global__ void
clearBBoxKernel(PtrStep<T> volume,int3 shift,float3 m,float3 M)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < VOLUME_X && y < VOLUME_Y)
{
int ax = x + shift.x;
if (ax >= VOLUME_X)
ax -= VOLUME_X;
int ay = y + shift.y;
if (ay >= VOLUME_Y)
ay -= VOLUME_Y;
T *pos = volume.ptr(ay) + ax;
int z_step = VOLUME_Y * volume.step / sizeof(*pos);
#pragma unroll
for(int z = 0; z < VOLUME_Z; ++z)
{
int az = z + shift.z;
if (az >= VOLUME_Z)
az -= VOLUME_Z;
float3 pt;
pt.x = float(x);
pt.y = float(y);
pt.z = float(z);
if ((pt.x >= m.x) && (pt.y >= m.y) && (pt.z >= m.z) &&
(pt.x < M.x) && (pt.y < M.y) && (pt.z < M.z))
pack_tsdf(0.f, 0, *(pos + (az * z_step)));
}
}
}
template<typename T>
__global__ void
clearSliceKernel (PtrStep<T> volume, pcl::gpu::kinfuLS::tsdf_buffer buffer, int3 minBounds, int3 maxBounds)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
//compute relative indices
int idX, idY;
if(x <= minBounds.x)
idX = x + buffer.voxels_size.x;
else
idX = x;
if(y <= minBounds.y)
idY = y + buffer.voxels_size.y;
else
idY = y;
if ( x < buffer.voxels_size.x && y < buffer.voxels_size.y)
{
if( (idX >= minBounds.x && idX <= maxBounds.x) || (idY >= minBounds.y && idY <= maxBounds.y) )
{
// BLACK ZONE => clear on all Z values
///Pointer to the first x,y,0
T *pos = volume.ptr(y) + x;
///Get the step on Z
int z_step = buffer.voxels_size.y * volume.step / sizeof(*pos);
///Get the size of the whole TSDF memory
int size = buffer.tsdf_memory_end - buffer.tsdf_memory_start + 1;
///Move along z axis
#pragma unroll
for(int z = 0; z < buffer.voxels_size.z; ++z, pos+=z_step)
{
///If we went outside of the memory, make sure we go back to the begining of it
if(pos > buffer.tsdf_memory_end)
pos = pos - size;
if (pos >= buffer.tsdf_memory_start && pos <= buffer.tsdf_memory_end) // quickfix for http://dev.pointclouds.org/issues/894
pack_tsdf (0.f, 0, *pos);
}
}
else /* if( idX > maxBounds.x && idY > maxBounds.y)*/
{
///RED ZONE => clear only appropriate Z
///Pointer to the first x,y,0
T *pos = volume.ptr(y) + x;
///Get the step on Z
int z_step = buffer.voxels_size.y * volume.step / sizeof(*pos);
///Get the size of the whole TSDF memory
int size = buffer.tsdf_memory_end - buffer.tsdf_memory_start + 1;
///Move pointer to the Z origin
pos+= minBounds.z * z_step;
///If the Z offset is negative, we move the pointer back
if(maxBounds.z < 0)
pos += maxBounds.z * z_step;
///We make sure that we are not already before the start of the memory
if(pos < buffer.tsdf_memory_start)
pos = pos + size;
int nbSteps = abs(maxBounds.z);
#pragma unroll
for(int z = 0; z < nbSteps; ++z, pos+=z_step)
{
///If we went outside of the memory, make sure we go back to the begining of it
if(pos > buffer.tsdf_memory_end)
pos = pos - size;
if (pos >= buffer.tsdf_memory_start && pos <= buffer.tsdf_memory_end) // quickfix for http://dev.pointclouds.org/issues/894
pack_tsdf (0.f, 0, *pos);
}
} //else /* if( idX > maxBounds.x && idY > maxBounds.y)*/
} // if ( x < VOLUME_X && y < VOLUME_Y)
} // clearSliceKernel
void
initVolume (int3 voxels_size,PtrStep<short2> volume)
{
dim3 block (16, 16);
dim3 grid (1, 1, 1);
grid.x = divUp (voxels_size.x, block.x);
grid.y = divUp (voxels_size.y, block.y);
hipLaunchKernelGGL(( initializeVolume), dim3(grid), dim3(block), 0, 0, voxels_size,volume);
cudaSafeCall ( hipGetLastError () );
cudaSafeCall (hipDeviceSynchronize ());
}
void
clearSphere(PtrStep<short2> volume,int3 tsdf_origin,float3 center,float radius)
{
dim3 block (32, 16);
dim3 grid (1, 1, 1);
grid.x = divUp (VOLUME_X, block.x);
grid.y = divUp (VOLUME_Y, block.y);
hipLaunchKernelGGL(( clearSphereKernel), dim3(grid), dim3(block), 0, 0, volume,tsdf_origin,center,radius);
cudaSafeCall ( hipGetLastError () );
cudaSafeCall (hipDeviceSynchronize ());
}
void
clearBBox(PtrStep<short2> volume,const int3& origin,const float3& m,const float3& M)
{
dim3 block (32, 16);
dim3 grid (1, 1, 1);
grid.x = divUp (VOLUME_X, block.x);
grid.y = divUp (VOLUME_Y, block.y);
hipLaunchKernelGGL(( clearBBoxKernel), dim3(grid), dim3(block), 0, 0, volume,origin,m,M);
cudaSafeCall ( hipGetLastError () );
cudaSafeCall (hipDeviceSynchronize ());
}
}
}
}
namespace pcl
{
namespace device
{
namespace kinfuLS
{
struct Tsdf
{
enum
{
CTA_SIZE_X = 32, CTA_SIZE_Y = 8,
MAX_WEIGHT = 1 << 7
};
mutable PtrStep<short2> volume;
float3 cell_size;
Intr intr;
Mat33 Rcurr_inv;
float3 tcurr;
PtrStepSz<ushort> depth_raw; //depth in mm
float tranc_dist_mm;
__device__ __forceinline__ float3
getVoxelGCoo (int x, int y, int z) const
{
float3 coo = make_float3 (x, y, z);
coo += 0.5f; //shift to cell center;
coo.x *= cell_size.x;
coo.y *= cell_size.y;
coo.z *= cell_size.z;
return coo;
}
__device__ __forceinline__ void
operator () () const
{
int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
if (x >= VOLUME_X || y >= VOLUME_Y)
return;
short2 *pos = volume.ptr (y) + x;
int elem_step = volume.step * VOLUME_Y / sizeof(*pos);
for (int z = 0; z < VOLUME_Z; ++z, pos += elem_step)
{
float3 v_g = getVoxelGCoo (x, y, z); //3 // p
//tranform to curr cam coo space
float3 v = Rcurr_inv * (v_g - tcurr); //4
int2 coo; //project to current cam
coo.x = __float2int_rn (v.x * intr.fx / v.z + intr.cx);
coo.y = __float2int_rn (v.y * intr.fy / v.z + intr.cy);
if (v.z > 0 && coo.x >= 0 && coo.y >= 0 && coo.x < depth_raw.cols && coo.y < depth_raw.rows) //6
{
int Dp = depth_raw.ptr (coo.y)[coo.x];
if (Dp != 0)
{
float xl = (coo.x - intr.cx) / intr.fx;
float yl = (coo.y - intr.cy) / intr.fy;
float lambda_inv = rsqrtf (xl * xl + yl * yl + 1);
float sdf = 1000 * norm (tcurr - v_g) * lambda_inv - Dp; //mm
sdf *= (-1);
if (sdf >= -tranc_dist_mm)
{
float tsdf = fmin (1.f, sdf / tranc_dist_mm);
int weight_prev;
float tsdf_prev;
//read and unpack
unpack_tsdf (*pos, tsdf_prev, weight_prev);
const int Wrk = 1;
float tsdf_new = (tsdf_prev * weight_prev + Wrk * tsdf) / (weight_prev + Wrk);
int weight_new = min (weight_prev + Wrk, MAX_WEIGHT);
pack_tsdf (tsdf_new, weight_new, *pos);
}
}
}
}
}
};
template<typename T>
__global__ void
uploadKnownToTSDFSliceKernel (PtrStep<T> volume, pcl::gpu::kinfuLS::tsdf_buffer buffer, int3 minBounds, int3 maxBounds,
PtrStep<short> known_status)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
//compute relative indices
int idX, idY;
if(x <= minBounds.x)
idX = x + buffer.voxels_size.x;
else
idX = x;
if(y <= minBounds.y)
idY = y + buffer.voxels_size.y;
else
idY = y;
if ( x < buffer.voxels_size.x && y < buffer.voxels_size.y)
{
if( (idX >= minBounds.x && idX <= maxBounds.x) || (idY >= minBounds.y && idY <= maxBounds.y) )
{
// BLACK ZONE => clear on all Z values
///Pointer to the first x,y,0
T *pos = volume.ptr(y) + x;
///Get the step on Z
int z_step = buffer.voxels_size.y * volume.step / sizeof(*pos);
///Get the size of the whole TSDF memory
int size = buffer.tsdf_memory_end - buffer.tsdf_memory_start + 1;
short * ks = known_status.ptr(y) + x;
short * max_ks = known_status.ptr(0) + buffer.voxels_size.x*buffer.voxels_size.y*buffer.voxels_size.z;
///Move along z axis
#pragma unroll
for(int z = 0; z < buffer.voxels_size.z; ++z, pos+=z_step, ks+=z_step)
{
///If we went outside of the memory, make sure we go back to the begining of it
if(pos > buffer.tsdf_memory_end)
pos = pos - size;
if (ks >= max_ks)
ks -= size;
const short increment = *ks;
if (increment && pos >= buffer.tsdf_memory_start && pos <= buffer.tsdf_memory_end) {
float tsdf;
int w;
unpack_tsdf(*pos, tsdf, w);
if (w == 0)
tsdf = 1.0;
pack_tsdf (tsdf, min(increment + w,(Tsdf::MAX_WEIGHT)), *pos);
}
}
}
else /* if( idX > maxBounds.x && idY > maxBounds.y)*/
{
///RED ZONE => clear only appropriate Z
///Pointer to the first x,y,0
T *pos = volume.ptr(y) + x;
///Get the step on Z
int z_step = buffer.voxels_size.y * volume.step / sizeof(*pos);
///Get the size of the whole TSDF memory
int size = buffer.tsdf_memory_end - buffer.tsdf_memory_start + 1;
short * ks = known_status.ptr(y) + x;
short * max_ks = known_status.ptr(0) + buffer.voxels_size.x*buffer.voxels_size.y*buffer.voxels_size.z;
///Move pointer to the Z origin
pos+= minBounds.z * z_step;
ks+= minBounds.z * z_step;
///If the Z offset is negative, we move the pointer back
if(maxBounds.z < 0) {
pos += maxBounds.z * z_step;
ks += minBounds.z * z_step;
}
///We make sure that we are not already before the start of the memory
if(pos < buffer.tsdf_memory_start) {
pos = pos + size;
ks += size;
}
int nbSteps = abs(maxBounds.z);
#pragma unroll
for(int z = 0; z < nbSteps; ++z, pos+=z_step, ks+=z_step)
{
///If we went outside of the memory, make sure we go back to the begining of it
if(pos > buffer.tsdf_memory_end)
pos = pos - size;
if (ks >= max_ks)
ks -= size;
const short increment = *ks;
if (increment && pos >= buffer.tsdf_memory_start && pos <= buffer.tsdf_memory_end) {
float tsdf;
int w;
unpack_tsdf(*pos, tsdf, w);
if (w == 0)
tsdf = 1.0;
pack_tsdf (tsdf, min(increment + w,(Tsdf::MAX_WEIGHT)), *pos);
}
}
} //else /* if( idX > maxBounds.x && idY > maxBounds.y)*/
} // if ( x < VOLUME_X && y < VOLUME_Y)
} // clearSliceKernel
__global__ void
integrateTsdfKernel (const Tsdf tsdf) {
tsdf ();
}
__global__ void
tsdf2 (PtrStep<short2> volume, const float tranc_dist_mm, const Mat33 Rcurr_inv, float3 tcurr,
const Intr intr, const PtrStepSz<ushort> depth_raw, const float3 cell_size)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= VOLUME_X || y >= VOLUME_Y)
return;
short2 *pos = volume.ptr (y) + x;
int elem_step = volume.step * VOLUME_Y / sizeof(short2);
float v_g_x = (x + 0.5f) * cell_size.x - tcurr.x;
float v_g_y = (y + 0.5f) * cell_size.y - tcurr.y;
float v_g_z = (0 + 0.5f) * cell_size.z - tcurr.z;
float v_x = Rcurr_inv.data[0].x * v_g_x + Rcurr_inv.data[0].y * v_g_y + Rcurr_inv.data[0].z * v_g_z;
float v_y = Rcurr_inv.data[1].x * v_g_x + Rcurr_inv.data[1].y * v_g_y + Rcurr_inv.data[1].z * v_g_z;
float v_z = Rcurr_inv.data[2].x * v_g_x + Rcurr_inv.data[2].y * v_g_y + Rcurr_inv.data[2].z * v_g_z;
//#pragma unroll
for (int z = 0; z < VOLUME_Z; ++z)
{
float3 vr;
vr.x = v_g_x;
vr.y = v_g_y;
vr.z = (v_g_z + z * cell_size.z);
float3 v;
v.x = v_x + Rcurr_inv.data[0].z * z * cell_size.z;
v.y = v_y + Rcurr_inv.data[1].z * z * cell_size.z;
v.z = v_z + Rcurr_inv.data[2].z * z * cell_size.z;
int2 coo; //project to current cam
coo.x = __float2int_rn (v.x * intr.fx / v.z + intr.cx);
coo.y = __float2int_rn (v.y * intr.fy / v.z + intr.cy);
if (v.z > 0 && coo.x >= 0 && coo.y >= 0 && coo.x < depth_raw.cols && coo.y < depth_raw.rows) //6
{
int Dp = depth_raw.ptr (coo.y)[coo.x]; //mm
if (Dp != 0)
{
float xl = (coo.x - intr.cx) / intr.fx;
float yl = (coo.y - intr.cy) / intr.fy;
float lambda_inv = rsqrtf (xl * xl + yl * yl + 1);
float sdf = Dp - norm (vr) * lambda_inv * 1000; //mm
if (sdf >= -tranc_dist_mm)
{
float tsdf = fmin (1.f, sdf / tranc_dist_mm);
int weight_prev;
float tsdf_prev;
//read and unpack
unpack_tsdf (*pos, tsdf_prev, weight_prev);
const int Wrk = 1;
float tsdf_new = (tsdf_prev * weight_prev + Wrk * tsdf) / (weight_prev + Wrk);
int weight_new = min (weight_prev + Wrk, Tsdf::MAX_WEIGHT);
pack_tsdf (tsdf_new, weight_new, *pos);
}
}
}
pos += elem_step;
} /* for(int z = 0; z < VOLUME_Z; ++z) */
} /* __global__ */
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void
integrateTsdfVolume (const PtrStepSz<ushort>& depth_raw, const Intr& intr, const float3& volume_size,
const Mat33& Rcurr_inv, const float3& tcurr, float tranc_dist,
PtrStep<short2> volume)
{
Tsdf tsdf;
tsdf.volume = volume;
tsdf.cell_size.x = volume_size.x / VOLUME_X;
tsdf.cell_size.y = volume_size.y / VOLUME_Y;
tsdf.cell_size.z = volume_size.z / VOLUME_Z;
tsdf.intr = intr;
tsdf.Rcurr_inv = Rcurr_inv;
tsdf.tcurr = tcurr;
tsdf.depth_raw = depth_raw;
tsdf.tranc_dist_mm = tranc_dist*1000; //mm
dim3 block (Tsdf::CTA_SIZE_X, Tsdf::CTA_SIZE_Y);
dim3 grid (divUp (VOLUME_X, block.x), divUp (VOLUME_Y, block.y));
#if 0
//tsdf2<<<grid, block>>>(volume, tranc_dist, Rcurr_inv, tcurr, intr, depth_raw, tsdf.cell_size);
hipLaunchKernelGGL(( integrateTsdfKernel), dim3(grid), dim3(block), 0, 0, tsdf);
#endif
cudaSafeCall ( hipGetLastError () );
cudaSafeCall (hipDeviceSynchronize ());
}
}
}
}
namespace pcl
{
namespace device
{
namespace kinfuLS
{
__global__ void
scaleDepth (const PtrStepSz<ushort> depth, PtrStep<float> scaled, const Intr intr)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= depth.cols || y >= depth.rows)
return;
int Dp = depth.ptr (y)[x];
float xl = (x - intr.cx) / intr.fx;
float yl = (y - intr.cy) / intr.fy;
float lambda = sqrtf (xl * xl + yl * yl + 1);
scaled.ptr (y)[x] = Dp * lambda/1000.f; //meters
}
__global__ void
tsdf23 (const PtrStepSz<float> depthScaled, PtrStep<short2> volume,
const float tranc_dist, const Mat33 Rcurr_inv, const float3 tcurr, const Intr intr, const float3 cell_size, const pcl::gpu::kinfuLS::tsdf_buffer buffer)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= buffer.voxels_size.x || y >= buffer.voxels_size.y)
return;
float v_g_x = (x + 0.5f) * cell_size.x - tcurr.x;
float v_g_y = (y + 0.5f) * cell_size.y - tcurr.y;
float v_g_z = (0 + 0.5f) * cell_size.z - tcurr.z;
float v_g_part_norm = v_g_x * v_g_x + v_g_y * v_g_y;
float v_x = (Rcurr_inv.data[0].x * v_g_x + Rcurr_inv.data[0].y * v_g_y + Rcurr_inv.data[0].z * v_g_z) * intr.fx;
float v_y = (Rcurr_inv.data[1].x * v_g_x + Rcurr_inv.data[1].y * v_g_y + Rcurr_inv.data[1].z * v_g_z) * intr.fy;
float v_z = (Rcurr_inv.data[2].x * v_g_x + Rcurr_inv.data[2].y * v_g_y + Rcurr_inv.data[2].z * v_g_z);
float z_scaled = 0;
float Rcurr_inv_0_z_scaled = Rcurr_inv.data[0].z * cell_size.z * intr.fx;
float Rcurr_inv_1_z_scaled = Rcurr_inv.data[1].z * cell_size.z * intr.fy;
float tranc_dist_inv = 1.0f / tranc_dist;
short2* pos = volume.ptr (y) + x;
// shift the pointer to relative indices
shift_tsdf_pointer(&pos, buffer);
int elem_step = volume.step * buffer.voxels_size.y / sizeof(short2);
//#pragma unroll
for (int z = 0; z < buffer.voxels_size.z;
++z,
v_g_z += cell_size.z,
z_scaled += cell_size.z,
v_x += Rcurr_inv_0_z_scaled,
v_y += Rcurr_inv_1_z_scaled,
pos += elem_step)
{
// As the pointer is incremented in the for loop, we have to make sure that the pointer is never outside the memory
if(pos > buffer.tsdf_memory_end)
pos -= (buffer.tsdf_memory_end - buffer.tsdf_memory_start + 1);
float inv_z = 1.0f / (v_z + Rcurr_inv.data[2].z * z_scaled);
if (inv_z < 0)
continue;
// project to current cam
int2 coo =
{
__float2int_rn (v_x * inv_z + intr.cx),
__float2int_rn (v_y * inv_z + intr.cy)
};
if (coo.x >= 0 && coo.y >= 0 && coo.x < depthScaled.cols && coo.y < depthScaled.rows) //6
{
float Dp_scaled = depthScaled.ptr (coo.y)[coo.x]; //meters
float sdf = Dp_scaled - sqrtf (v_g_z * v_g_z + v_g_part_norm);
if (Dp_scaled != 0 && sdf >= -tranc_dist) //meters
{
float tsdf = fmin (1.0f, sdf * tranc_dist_inv);
//read and unpack
float tsdf_prev;
int weight_prev;
unpack_tsdf (*pos, tsdf_prev, weight_prev);
const int Wrk = 1;
float tsdf_new = (tsdf_prev * weight_prev + Wrk * tsdf) / (weight_prev + Wrk);
int weight_new = min (weight_prev + Wrk, Tsdf::MAX_WEIGHT);
pack_tsdf (tsdf_new, weight_new, *pos);
}
}
} // for(int z = 0; z < VOLUME_Z; ++z)
} // __global__
__global__ void
tsdf23normal_hack (const PtrStepSz<float> depthScaled, PtrStep<short2> volume,
const float tranc_dist, const Mat33 Rcurr_inv, const float3 tcurr, const Intr intr, const float3 cell_size)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= VOLUME_X || y >= VOLUME_Y)
return;
const float v_g_x = (x + 0.5f) * cell_size.x - tcurr.x;
const float v_g_y = (y + 0.5f) * cell_size.y - tcurr.y;
float v_g_z = (0 + 0.5f) * cell_size.z - tcurr.z;
float v_g_part_norm = v_g_x * v_g_x + v_g_y * v_g_y;
float v_x = (Rcurr_inv.data[0].x * v_g_x + Rcurr_inv.data[0].y * v_g_y + Rcurr_inv.data[0].z * v_g_z) * intr.fx;
float v_y = (Rcurr_inv.data[1].x * v_g_x + Rcurr_inv.data[1].y * v_g_y + Rcurr_inv.data[1].z * v_g_z) * intr.fy;
float v_z = (Rcurr_inv.data[2].x * v_g_x + Rcurr_inv.data[2].y * v_g_y + Rcurr_inv.data[2].z * v_g_z);
float z_scaled = 0;
float Rcurr_inv_0_z_scaled = Rcurr_inv.data[0].z * cell_size.z * intr.fx;
float Rcurr_inv_1_z_scaled = Rcurr_inv.data[1].z * cell_size.z * intr.fy;
float tranc_dist_inv = 1.0f / tranc_dist;
short2* pos = volume.ptr (y) + x;
int elem_step = volume.step * VOLUME_Y / sizeof(short2);
//#pragma unroll
for (int z = 0; z < VOLUME_Z;
++z,
v_g_z += cell_size.z,
z_scaled += cell_size.z,
v_x += Rcurr_inv_0_z_scaled,
v_y += Rcurr_inv_1_z_scaled,
pos += elem_step)
{
float inv_z = 1.0f / (v_z + Rcurr_inv.data[2].z * z_scaled);
if (inv_z < 0)
continue;
// project to current cam
int2 coo =
{
__float2int_rn (v_x * inv_z + intr.cx),
__float2int_rn (v_y * inv_z + intr.cy)
};
if (coo.x >= 0 && coo.y >= 0 && coo.x < depthScaled.cols && coo.y < depthScaled.rows) //6
{
float Dp_scaled = depthScaled.ptr (coo.y)[coo.x]; //meters
float sdf = Dp_scaled - sqrtf (v_g_z * v_g_z + v_g_part_norm);
if (Dp_scaled != 0 && sdf >= -tranc_dist) //meters
{
float tsdf = fmin (1.0f, sdf * tranc_dist_inv);
bool integrate = true;
if ((x > 0 && x < VOLUME_X-2) && (y > 0 && y < VOLUME_Y-2) && (z > 0 && z < VOLUME_Z-2))
{
const float qnan = numeric_limits<float>::quiet_NaN();
float3 normal = make_float3(qnan, qnan, qnan);
float Fn, Fp;
int Wn = 0, Wp = 0;
unpack_tsdf (*(pos + elem_step), Fn, Wn);
unpack_tsdf (*(pos - elem_step), Fp, Wp);
if (Wn > 16 && Wp > 16)
normal.z = (Fn - Fp)/cell_size.z;
unpack_tsdf (*(pos + volume.step/sizeof(short2) ), Fn, Wn);
unpack_tsdf (*(pos - volume.step/sizeof(short2) ), Fp, Wp);
if (Wn > 16 && Wp > 16)
normal.y = (Fn - Fp)/cell_size.y;
unpack_tsdf (*(pos + 1), Fn, Wn);
unpack_tsdf (*(pos - 1), Fp, Wp);
if (Wn > 16 && Wp > 16)
normal.x = (Fn - Fp)/cell_size.x;
if (normal.x != qnan && normal.y != qnan && normal.z != qnan)
{
float norm2 = dot(normal, normal);
if (norm2 >= 1e-10)
{
normal *= rsqrt(norm2);
float nt = v_g_x * normal.x + v_g_y * normal.y + v_g_z * normal.z;
float cosine = nt * rsqrt(v_g_x * v_g_x + v_g_y * v_g_y + v_g_z * v_g_z);
if (cosine < 0.5)
integrate = false;
}
}
}
if (integrate)
{
//read and unpack
float tsdf_prev;
int weight_prev;
unpack_tsdf (*pos, tsdf_prev, weight_prev);
const int Wrk = 1;
float tsdf_new = (tsdf_prev * weight_prev + Wrk * tsdf) / (weight_prev + Wrk);
int weight_new = min (weight_prev + Wrk, Tsdf::MAX_WEIGHT);
pack_tsdf (tsdf_new, weight_new, *pos);
}
}
}
} // for(int z = 0; z < VOLUME_Z; ++z)
} // __global__
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void
integrateTsdfVolume (const PtrStepSz<ushort>& depth, const Intr& intr,
const float3& volume_size, const Mat33& Rcurr_inv, const float3& tcurr,
float tranc_dist,
PtrStep<short2> volume, const pcl::gpu::kinfuLS::tsdf_buffer* buffer, DeviceArray2D<float>& depthScaled)
{
depthScaled.create (depth.rows, depth.cols);
dim3 block_scale (32, 8);
dim3 grid_scale (divUp (depth.cols, block_scale.x), divUp (depth.rows, block_scale.y));
//scales depth along ray and converts mm -> meters.
hipLaunchKernelGGL(( scaleDepth), dim3(grid_scale), dim3(block_scale), 0, 0, depth, depthScaled, intr);
cudaSafeCall ( hipGetLastError () );
float3 cell_size;
cell_size.x = volume_size.x / buffer->voxels_size.x;
cell_size.y = volume_size.y / buffer->voxels_size.y;
cell_size.z = volume_size.z / buffer->voxels_size.z;
//dim3 block(Tsdf::CTA_SIZE_X, Tsdf::CTA_SIZE_Y);
dim3 block (16, 16);
dim3 grid (divUp (buffer->voxels_size.x, block.x), divUp (buffer->voxels_size.y, block.y));
hipLaunchKernelGGL(( tsdf23), dim3(grid), dim3(block), 0, 0, depthScaled, volume, tranc_dist, Rcurr_inv, tcurr, intr, cell_size, *buffer);
//tsdf23normal_hack<<<grid, block>>>(depthScaled, volume, tranc_dist, Rcurr_inv, tcurr, intr, cell_size);
cudaSafeCall ( hipGetLastError () );
cudaSafeCall (hipDeviceSynchronize ());
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void
clearTSDFSlice (PtrStep<short2> volume, pcl::gpu::kinfuLS::tsdf_buffer* buffer, int shiftX, int shiftY, int shiftZ)
{
int newX = buffer->origin_GRID.x + shiftX;
int newY = buffer->origin_GRID.y + shiftY;
int3 minBounds, maxBounds;
//X
if(newX >= 0)
{
minBounds.x = buffer->origin_GRID.x;
maxBounds.x = newX;
}
else
{
minBounds.x = newX + buffer->voxels_size.x;
maxBounds.x = buffer->origin_GRID.x + buffer->voxels_size.x;
}
if(minBounds.x > maxBounds.x)
std::swap(minBounds.x, maxBounds.x);
//Y
if(newY >= 0)
{
minBounds.y = buffer->origin_GRID.y;
maxBounds.y = newY;
}
else
{
minBounds.y = newY + buffer->voxels_size.y;
maxBounds.y = buffer->origin_GRID.y + buffer->voxels_size.y;
}
if(minBounds.y > maxBounds.y)
std::swap(minBounds.y, maxBounds.y);
//Z
minBounds.z = buffer->origin_GRID.z;
maxBounds.z = shiftZ;
// call kernel
dim3 block (32, 16);
dim3 grid (1, 1, 1);
grid.x = divUp (buffer->voxels_size.x, block.x);
grid.y = divUp (buffer->voxels_size.y, block.y);
hipLaunchKernelGGL(( clearSliceKernel), dim3(grid), dim3(block), 0, 0, volume, *buffer, minBounds, maxBounds);
cudaSafeCall ( hipGetLastError () );
cudaSafeCall (hipDeviceSynchronize ());
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void
uploadKnownToTSDFSlice (PtrStep<short2> volume, pcl::gpu::kinfuLS::tsdf_buffer* buffer, int shiftX, int shiftY, int shiftZ,
PtrStep<short> known_status)
{
int oldX = buffer->origin_GRID.x - shiftX;
int oldY = buffer->origin_GRID.y - shiftY;
int oldZ = buffer->origin_GRID.z - shiftZ;
int3 minBounds, maxBounds;
//X
if(oldX >= 0)
{
minBounds.x = buffer->origin_GRID.x;
maxBounds.x = oldX;
}
else
{
minBounds.x = oldX + buffer->voxels_size.x;
maxBounds.x = buffer->origin_GRID.x + buffer->voxels_size.x;
}
if(minBounds.x > maxBounds.x)
std::swap(minBounds.x, maxBounds.x);
//Y
if(oldY >= 0)
{
minBounds.y = buffer->origin_GRID.y;
maxBounds.y = oldY;
}
else
{
minBounds.y = oldY + buffer->voxels_size.y;
maxBounds.y = buffer->origin_GRID.y + buffer->voxels_size.y;
}
if(minBounds.y > maxBounds.y)
std::swap(minBounds.y, maxBounds.y);
while (oldZ < 0)
oldZ += buffer->voxels_size.z;
while (oldZ >= buffer->voxels_size.z)
oldZ -= buffer->voxels_size.z;
//Z
minBounds.z = oldZ;
maxBounds.z = shiftZ;
// call kernel
dim3 block (32, 16);
dim3 grid (1, 1, 1);
grid.x = divUp (buffer->voxels_size.x, block.x);
grid.y = divUp (buffer->voxels_size.y, block.y);
hipLaunchKernelGGL(( uploadKnownToTSDFSliceKernel), dim3(grid), dim3(block), 0, 0, volume, *buffer, minBounds, maxBounds, known_status);
cudaSafeCall ( hipGetLastError () );
cudaSafeCall (hipDeviceSynchronize ());
}
}
}
}
|
551cd3bc29790d09e135c576e9101ae84dbfbff4.cu
|
/*
* Software License Agreement (BSD License)
*
* Point Cloud Library (PCL) - www.pointclouds.org
* Copyright (c) 2011, Willow Garage, Inc.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "device.hpp"
//#include <boost/graph/buffer_concepts.hpp>
namespace pcl
{
namespace device
{
namespace kinfuLS
{
template<typename T>
__global__ void
initializeVolume (int3 voxels_size,PtrStep<T> volume)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < voxels_size.x && y < voxels_size.y)
{
T *pos = volume.ptr(y) + x;
int z_step = voxels_size.y * volume.step / sizeof(*pos);
#pragma unroll
for(int z = 0; z < voxels_size.z; ++z, pos+=z_step)
pack_tsdf (0.f, 0, *pos);
}
}
template<typename T>
__global__ void
clearSphereKernel(PtrStep<T> volume,int3 shift,float3 center,float radius)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < VOLUME_X && y < VOLUME_Y)
{
int ax = x + shift.x;
if (ax >= VOLUME_X)
ax -= VOLUME_X;
int ay = y + shift.y;
if (ay >= VOLUME_Y)
ay -= VOLUME_Y;
T *pos = volume.ptr(ay) + ax;
int z_step = VOLUME_Y * volume.step / sizeof(*pos);
#pragma unroll
for(int z = 0; z < VOLUME_Z; ++z)
{
int az = z + shift.z;
if (az >= VOLUME_Z)
az -= VOLUME_Z;
float3 pt;
pt.x = float(x);
pt.y = float(y);
pt.z = float(z);
if (norm(pt - center) < radius)
pack_tsdf(0.f, 0, *(pos + (az * z_step)));
}
}
}
template<typename T>
__global__ void
clearBBoxKernel(PtrStep<T> volume,int3 shift,float3 m,float3 M)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < VOLUME_X && y < VOLUME_Y)
{
int ax = x + shift.x;
if (ax >= VOLUME_X)
ax -= VOLUME_X;
int ay = y + shift.y;
if (ay >= VOLUME_Y)
ay -= VOLUME_Y;
T *pos = volume.ptr(ay) + ax;
int z_step = VOLUME_Y * volume.step / sizeof(*pos);
#pragma unroll
for(int z = 0; z < VOLUME_Z; ++z)
{
int az = z + shift.z;
if (az >= VOLUME_Z)
az -= VOLUME_Z;
float3 pt;
pt.x = float(x);
pt.y = float(y);
pt.z = float(z);
if ((pt.x >= m.x) && (pt.y >= m.y) && (pt.z >= m.z) &&
(pt.x < M.x) && (pt.y < M.y) && (pt.z < M.z))
pack_tsdf(0.f, 0, *(pos + (az * z_step)));
}
}
}
template<typename T>
__global__ void
clearSliceKernel (PtrStep<T> volume, pcl::gpu::kinfuLS::tsdf_buffer buffer, int3 minBounds, int3 maxBounds)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
//compute relative indices
int idX, idY;
if(x <= minBounds.x)
idX = x + buffer.voxels_size.x;
else
idX = x;
if(y <= minBounds.y)
idY = y + buffer.voxels_size.y;
else
idY = y;
if ( x < buffer.voxels_size.x && y < buffer.voxels_size.y)
{
if( (idX >= minBounds.x && idX <= maxBounds.x) || (idY >= minBounds.y && idY <= maxBounds.y) )
{
// BLACK ZONE => clear on all Z values
///Pointer to the first x,y,0
T *pos = volume.ptr(y) + x;
///Get the step on Z
int z_step = buffer.voxels_size.y * volume.step / sizeof(*pos);
///Get the size of the whole TSDF memory
int size = buffer.tsdf_memory_end - buffer.tsdf_memory_start + 1;
///Move along z axis
#pragma unroll
for(int z = 0; z < buffer.voxels_size.z; ++z, pos+=z_step)
{
///If we went outside of the memory, make sure we go back to the begining of it
if(pos > buffer.tsdf_memory_end)
pos = pos - size;
if (pos >= buffer.tsdf_memory_start && pos <= buffer.tsdf_memory_end) // quickfix for http://dev.pointclouds.org/issues/894
pack_tsdf (0.f, 0, *pos);
}
}
else /* if( idX > maxBounds.x && idY > maxBounds.y)*/
{
///RED ZONE => clear only appropriate Z
///Pointer to the first x,y,0
T *pos = volume.ptr(y) + x;
///Get the step on Z
int z_step = buffer.voxels_size.y * volume.step / sizeof(*pos);
///Get the size of the whole TSDF memory
int size = buffer.tsdf_memory_end - buffer.tsdf_memory_start + 1;
///Move pointer to the Z origin
pos+= minBounds.z * z_step;
///If the Z offset is negative, we move the pointer back
if(maxBounds.z < 0)
pos += maxBounds.z * z_step;
///We make sure that we are not already before the start of the memory
if(pos < buffer.tsdf_memory_start)
pos = pos + size;
int nbSteps = abs(maxBounds.z);
#pragma unroll
for(int z = 0; z < nbSteps; ++z, pos+=z_step)
{
///If we went outside of the memory, make sure we go back to the begining of it
if(pos > buffer.tsdf_memory_end)
pos = pos - size;
if (pos >= buffer.tsdf_memory_start && pos <= buffer.tsdf_memory_end) // quickfix for http://dev.pointclouds.org/issues/894
pack_tsdf (0.f, 0, *pos);
}
} //else /* if( idX > maxBounds.x && idY > maxBounds.y)*/
} // if ( x < VOLUME_X && y < VOLUME_Y)
} // clearSliceKernel
void
initVolume (int3 voxels_size,PtrStep<short2> volume)
{
dim3 block (16, 16);
dim3 grid (1, 1, 1);
grid.x = divUp (voxels_size.x, block.x);
grid.y = divUp (voxels_size.y, block.y);
initializeVolume<<<grid, block>>>(voxels_size,volume);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
}
void
clearSphere(PtrStep<short2> volume,int3 tsdf_origin,float3 center,float radius)
{
dim3 block (32, 16);
dim3 grid (1, 1, 1);
grid.x = divUp (VOLUME_X, block.x);
grid.y = divUp (VOLUME_Y, block.y);
clearSphereKernel<<<grid, block>>>(volume,tsdf_origin,center,radius);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
}
void
clearBBox(PtrStep<short2> volume,const int3& origin,const float3& m,const float3& M)
{
dim3 block (32, 16);
dim3 grid (1, 1, 1);
grid.x = divUp (VOLUME_X, block.x);
grid.y = divUp (VOLUME_Y, block.y);
clearBBoxKernel<<<grid, block>>>(volume,origin,m,M);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
}
}
}
}
namespace pcl
{
namespace device
{
namespace kinfuLS
{
struct Tsdf
{
enum
{
CTA_SIZE_X = 32, CTA_SIZE_Y = 8,
MAX_WEIGHT = 1 << 7
};
mutable PtrStep<short2> volume;
float3 cell_size;
Intr intr;
Mat33 Rcurr_inv;
float3 tcurr;
PtrStepSz<ushort> depth_raw; //depth in mm
float tranc_dist_mm;
__device__ __forceinline__ float3
getVoxelGCoo (int x, int y, int z) const
{
float3 coo = make_float3 (x, y, z);
coo += 0.5f; //shift to cell center;
coo.x *= cell_size.x;
coo.y *= cell_size.y;
coo.z *= cell_size.z;
return coo;
}
__device__ __forceinline__ void
operator () () const
{
int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
if (x >= VOLUME_X || y >= VOLUME_Y)
return;
short2 *pos = volume.ptr (y) + x;
int elem_step = volume.step * VOLUME_Y / sizeof(*pos);
for (int z = 0; z < VOLUME_Z; ++z, pos += elem_step)
{
float3 v_g = getVoxelGCoo (x, y, z); //3 // p
//tranform to curr cam coo space
float3 v = Rcurr_inv * (v_g - tcurr); //4
int2 coo; //project to current cam
coo.x = __float2int_rn (v.x * intr.fx / v.z + intr.cx);
coo.y = __float2int_rn (v.y * intr.fy / v.z + intr.cy);
if (v.z > 0 && coo.x >= 0 && coo.y >= 0 && coo.x < depth_raw.cols && coo.y < depth_raw.rows) //6
{
int Dp = depth_raw.ptr (coo.y)[coo.x];
if (Dp != 0)
{
float xl = (coo.x - intr.cx) / intr.fx;
float yl = (coo.y - intr.cy) / intr.fy;
float lambda_inv = rsqrtf (xl * xl + yl * yl + 1);
float sdf = 1000 * norm (tcurr - v_g) * lambda_inv - Dp; //mm
sdf *= (-1);
if (sdf >= -tranc_dist_mm)
{
float tsdf = fmin (1.f, sdf / tranc_dist_mm);
int weight_prev;
float tsdf_prev;
//read and unpack
unpack_tsdf (*pos, tsdf_prev, weight_prev);
const int Wrk = 1;
float tsdf_new = (tsdf_prev * weight_prev + Wrk * tsdf) / (weight_prev + Wrk);
int weight_new = min (weight_prev + Wrk, MAX_WEIGHT);
pack_tsdf (tsdf_new, weight_new, *pos);
}
}
}
}
}
};
template<typename T>
__global__ void
uploadKnownToTSDFSliceKernel (PtrStep<T> volume, pcl::gpu::kinfuLS::tsdf_buffer buffer, int3 minBounds, int3 maxBounds,
PtrStep<short> known_status)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
//compute relative indices
int idX, idY;
if(x <= minBounds.x)
idX = x + buffer.voxels_size.x;
else
idX = x;
if(y <= minBounds.y)
idY = y + buffer.voxels_size.y;
else
idY = y;
if ( x < buffer.voxels_size.x && y < buffer.voxels_size.y)
{
if( (idX >= minBounds.x && idX <= maxBounds.x) || (idY >= minBounds.y && idY <= maxBounds.y) )
{
// BLACK ZONE => clear on all Z values
///Pointer to the first x,y,0
T *pos = volume.ptr(y) + x;
///Get the step on Z
int z_step = buffer.voxels_size.y * volume.step / sizeof(*pos);
///Get the size of the whole TSDF memory
int size = buffer.tsdf_memory_end - buffer.tsdf_memory_start + 1;
short * ks = known_status.ptr(y) + x;
short * max_ks = known_status.ptr(0) + buffer.voxels_size.x*buffer.voxels_size.y*buffer.voxels_size.z;
///Move along z axis
#pragma unroll
for(int z = 0; z < buffer.voxels_size.z; ++z, pos+=z_step, ks+=z_step)
{
///If we went outside of the memory, make sure we go back to the begining of it
if(pos > buffer.tsdf_memory_end)
pos = pos - size;
if (ks >= max_ks)
ks -= size;
const short increment = *ks;
if (increment && pos >= buffer.tsdf_memory_start && pos <= buffer.tsdf_memory_end) {
float tsdf;
int w;
unpack_tsdf(*pos, tsdf, w);
if (w == 0)
tsdf = 1.0;
pack_tsdf (tsdf, min(increment + w,(Tsdf::MAX_WEIGHT)), *pos);
}
}
}
else /* if( idX > maxBounds.x && idY > maxBounds.y)*/
{
///RED ZONE => clear only appropriate Z
///Pointer to the first x,y,0
T *pos = volume.ptr(y) + x;
///Get the step on Z
int z_step = buffer.voxels_size.y * volume.step / sizeof(*pos);
///Get the size of the whole TSDF memory
int size = buffer.tsdf_memory_end - buffer.tsdf_memory_start + 1;
short * ks = known_status.ptr(y) + x;
short * max_ks = known_status.ptr(0) + buffer.voxels_size.x*buffer.voxels_size.y*buffer.voxels_size.z;
///Move pointer to the Z origin
pos+= minBounds.z * z_step;
ks+= minBounds.z * z_step;
///If the Z offset is negative, we move the pointer back
if(maxBounds.z < 0) {
pos += maxBounds.z * z_step;
ks += minBounds.z * z_step;
}
///We make sure that we are not already before the start of the memory
if(pos < buffer.tsdf_memory_start) {
pos = pos + size;
ks += size;
}
int nbSteps = abs(maxBounds.z);
#pragma unroll
for(int z = 0; z < nbSteps; ++z, pos+=z_step, ks+=z_step)
{
///If we went outside of the memory, make sure we go back to the begining of it
if(pos > buffer.tsdf_memory_end)
pos = pos - size;
if (ks >= max_ks)
ks -= size;
const short increment = *ks;
if (increment && pos >= buffer.tsdf_memory_start && pos <= buffer.tsdf_memory_end) {
float tsdf;
int w;
unpack_tsdf(*pos, tsdf, w);
if (w == 0)
tsdf = 1.0;
pack_tsdf (tsdf, min(increment + w,(Tsdf::MAX_WEIGHT)), *pos);
}
}
} //else /* if( idX > maxBounds.x && idY > maxBounds.y)*/
} // if ( x < VOLUME_X && y < VOLUME_Y)
} // clearSliceKernel
__global__ void
integrateTsdfKernel (const Tsdf tsdf) {
tsdf ();
}
__global__ void
tsdf2 (PtrStep<short2> volume, const float tranc_dist_mm, const Mat33 Rcurr_inv, float3 tcurr,
const Intr intr, const PtrStepSz<ushort> depth_raw, const float3 cell_size)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= VOLUME_X || y >= VOLUME_Y)
return;
short2 *pos = volume.ptr (y) + x;
int elem_step = volume.step * VOLUME_Y / sizeof(short2);
float v_g_x = (x + 0.5f) * cell_size.x - tcurr.x;
float v_g_y = (y + 0.5f) * cell_size.y - tcurr.y;
float v_g_z = (0 + 0.5f) * cell_size.z - tcurr.z;
float v_x = Rcurr_inv.data[0].x * v_g_x + Rcurr_inv.data[0].y * v_g_y + Rcurr_inv.data[0].z * v_g_z;
float v_y = Rcurr_inv.data[1].x * v_g_x + Rcurr_inv.data[1].y * v_g_y + Rcurr_inv.data[1].z * v_g_z;
float v_z = Rcurr_inv.data[2].x * v_g_x + Rcurr_inv.data[2].y * v_g_y + Rcurr_inv.data[2].z * v_g_z;
//#pragma unroll
for (int z = 0; z < VOLUME_Z; ++z)
{
float3 vr;
vr.x = v_g_x;
vr.y = v_g_y;
vr.z = (v_g_z + z * cell_size.z);
float3 v;
v.x = v_x + Rcurr_inv.data[0].z * z * cell_size.z;
v.y = v_y + Rcurr_inv.data[1].z * z * cell_size.z;
v.z = v_z + Rcurr_inv.data[2].z * z * cell_size.z;
int2 coo; //project to current cam
coo.x = __float2int_rn (v.x * intr.fx / v.z + intr.cx);
coo.y = __float2int_rn (v.y * intr.fy / v.z + intr.cy);
if (v.z > 0 && coo.x >= 0 && coo.y >= 0 && coo.x < depth_raw.cols && coo.y < depth_raw.rows) //6
{
int Dp = depth_raw.ptr (coo.y)[coo.x]; //mm
if (Dp != 0)
{
float xl = (coo.x - intr.cx) / intr.fx;
float yl = (coo.y - intr.cy) / intr.fy;
float lambda_inv = rsqrtf (xl * xl + yl * yl + 1);
float sdf = Dp - norm (vr) * lambda_inv * 1000; //mm
if (sdf >= -tranc_dist_mm)
{
float tsdf = fmin (1.f, sdf / tranc_dist_mm);
int weight_prev;
float tsdf_prev;
//read and unpack
unpack_tsdf (*pos, tsdf_prev, weight_prev);
const int Wrk = 1;
float tsdf_new = (tsdf_prev * weight_prev + Wrk * tsdf) / (weight_prev + Wrk);
int weight_new = min (weight_prev + Wrk, Tsdf::MAX_WEIGHT);
pack_tsdf (tsdf_new, weight_new, *pos);
}
}
}
pos += elem_step;
} /* for(int z = 0; z < VOLUME_Z; ++z) */
} /* __global__ */
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void
integrateTsdfVolume (const PtrStepSz<ushort>& depth_raw, const Intr& intr, const float3& volume_size,
const Mat33& Rcurr_inv, const float3& tcurr, float tranc_dist,
PtrStep<short2> volume)
{
Tsdf tsdf;
tsdf.volume = volume;
tsdf.cell_size.x = volume_size.x / VOLUME_X;
tsdf.cell_size.y = volume_size.y / VOLUME_Y;
tsdf.cell_size.z = volume_size.z / VOLUME_Z;
tsdf.intr = intr;
tsdf.Rcurr_inv = Rcurr_inv;
tsdf.tcurr = tcurr;
tsdf.depth_raw = depth_raw;
tsdf.tranc_dist_mm = tranc_dist*1000; //mm
dim3 block (Tsdf::CTA_SIZE_X, Tsdf::CTA_SIZE_Y);
dim3 grid (divUp (VOLUME_X, block.x), divUp (VOLUME_Y, block.y));
#if 0
//tsdf2<<<grid, block>>>(volume, tranc_dist, Rcurr_inv, tcurr, intr, depth_raw, tsdf.cell_size);
integrateTsdfKernel<<<grid, block>>>(tsdf);
#endif
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
}
}
}
}
namespace pcl
{
namespace device
{
namespace kinfuLS
{
__global__ void
scaleDepth (const PtrStepSz<ushort> depth, PtrStep<float> scaled, const Intr intr)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= depth.cols || y >= depth.rows)
return;
int Dp = depth.ptr (y)[x];
float xl = (x - intr.cx) / intr.fx;
float yl = (y - intr.cy) / intr.fy;
float lambda = sqrtf (xl * xl + yl * yl + 1);
scaled.ptr (y)[x] = Dp * lambda/1000.f; //meters
}
__global__ void
tsdf23 (const PtrStepSz<float> depthScaled, PtrStep<short2> volume,
const float tranc_dist, const Mat33 Rcurr_inv, const float3 tcurr, const Intr intr, const float3 cell_size, const pcl::gpu::kinfuLS::tsdf_buffer buffer)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= buffer.voxels_size.x || y >= buffer.voxels_size.y)
return;
float v_g_x = (x + 0.5f) * cell_size.x - tcurr.x;
float v_g_y = (y + 0.5f) * cell_size.y - tcurr.y;
float v_g_z = (0 + 0.5f) * cell_size.z - tcurr.z;
float v_g_part_norm = v_g_x * v_g_x + v_g_y * v_g_y;
float v_x = (Rcurr_inv.data[0].x * v_g_x + Rcurr_inv.data[0].y * v_g_y + Rcurr_inv.data[0].z * v_g_z) * intr.fx;
float v_y = (Rcurr_inv.data[1].x * v_g_x + Rcurr_inv.data[1].y * v_g_y + Rcurr_inv.data[1].z * v_g_z) * intr.fy;
float v_z = (Rcurr_inv.data[2].x * v_g_x + Rcurr_inv.data[2].y * v_g_y + Rcurr_inv.data[2].z * v_g_z);
float z_scaled = 0;
float Rcurr_inv_0_z_scaled = Rcurr_inv.data[0].z * cell_size.z * intr.fx;
float Rcurr_inv_1_z_scaled = Rcurr_inv.data[1].z * cell_size.z * intr.fy;
float tranc_dist_inv = 1.0f / tranc_dist;
short2* pos = volume.ptr (y) + x;
// shift the pointer to relative indices
shift_tsdf_pointer(&pos, buffer);
int elem_step = volume.step * buffer.voxels_size.y / sizeof(short2);
//#pragma unroll
for (int z = 0; z < buffer.voxels_size.z;
++z,
v_g_z += cell_size.z,
z_scaled += cell_size.z,
v_x += Rcurr_inv_0_z_scaled,
v_y += Rcurr_inv_1_z_scaled,
pos += elem_step)
{
// As the pointer is incremented in the for loop, we have to make sure that the pointer is never outside the memory
if(pos > buffer.tsdf_memory_end)
pos -= (buffer.tsdf_memory_end - buffer.tsdf_memory_start + 1);
float inv_z = 1.0f / (v_z + Rcurr_inv.data[2].z * z_scaled);
if (inv_z < 0)
continue;
// project to current cam
int2 coo =
{
__float2int_rn (v_x * inv_z + intr.cx),
__float2int_rn (v_y * inv_z + intr.cy)
};
if (coo.x >= 0 && coo.y >= 0 && coo.x < depthScaled.cols && coo.y < depthScaled.rows) //6
{
float Dp_scaled = depthScaled.ptr (coo.y)[coo.x]; //meters
float sdf = Dp_scaled - sqrtf (v_g_z * v_g_z + v_g_part_norm);
if (Dp_scaled != 0 && sdf >= -tranc_dist) //meters
{
float tsdf = fmin (1.0f, sdf * tranc_dist_inv);
//read and unpack
float tsdf_prev;
int weight_prev;
unpack_tsdf (*pos, tsdf_prev, weight_prev);
const int Wrk = 1;
float tsdf_new = (tsdf_prev * weight_prev + Wrk * tsdf) / (weight_prev + Wrk);
int weight_new = min (weight_prev + Wrk, Tsdf::MAX_WEIGHT);
pack_tsdf (tsdf_new, weight_new, *pos);
}
}
} // for(int z = 0; z < VOLUME_Z; ++z)
} // __global__
__global__ void
tsdf23normal_hack (const PtrStepSz<float> depthScaled, PtrStep<short2> volume,
const float tranc_dist, const Mat33 Rcurr_inv, const float3 tcurr, const Intr intr, const float3 cell_size)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= VOLUME_X || y >= VOLUME_Y)
return;
const float v_g_x = (x + 0.5f) * cell_size.x - tcurr.x;
const float v_g_y = (y + 0.5f) * cell_size.y - tcurr.y;
float v_g_z = (0 + 0.5f) * cell_size.z - tcurr.z;
float v_g_part_norm = v_g_x * v_g_x + v_g_y * v_g_y;
float v_x = (Rcurr_inv.data[0].x * v_g_x + Rcurr_inv.data[0].y * v_g_y + Rcurr_inv.data[0].z * v_g_z) * intr.fx;
float v_y = (Rcurr_inv.data[1].x * v_g_x + Rcurr_inv.data[1].y * v_g_y + Rcurr_inv.data[1].z * v_g_z) * intr.fy;
float v_z = (Rcurr_inv.data[2].x * v_g_x + Rcurr_inv.data[2].y * v_g_y + Rcurr_inv.data[2].z * v_g_z);
float z_scaled = 0;
float Rcurr_inv_0_z_scaled = Rcurr_inv.data[0].z * cell_size.z * intr.fx;
float Rcurr_inv_1_z_scaled = Rcurr_inv.data[1].z * cell_size.z * intr.fy;
float tranc_dist_inv = 1.0f / tranc_dist;
short2* pos = volume.ptr (y) + x;
int elem_step = volume.step * VOLUME_Y / sizeof(short2);
//#pragma unroll
for (int z = 0; z < VOLUME_Z;
++z,
v_g_z += cell_size.z,
z_scaled += cell_size.z,
v_x += Rcurr_inv_0_z_scaled,
v_y += Rcurr_inv_1_z_scaled,
pos += elem_step)
{
float inv_z = 1.0f / (v_z + Rcurr_inv.data[2].z * z_scaled);
if (inv_z < 0)
continue;
// project to current cam
int2 coo =
{
__float2int_rn (v_x * inv_z + intr.cx),
__float2int_rn (v_y * inv_z + intr.cy)
};
if (coo.x >= 0 && coo.y >= 0 && coo.x < depthScaled.cols && coo.y < depthScaled.rows) //6
{
float Dp_scaled = depthScaled.ptr (coo.y)[coo.x]; //meters
float sdf = Dp_scaled - sqrtf (v_g_z * v_g_z + v_g_part_norm);
if (Dp_scaled != 0 && sdf >= -tranc_dist) //meters
{
float tsdf = fmin (1.0f, sdf * tranc_dist_inv);
bool integrate = true;
if ((x > 0 && x < VOLUME_X-2) && (y > 0 && y < VOLUME_Y-2) && (z > 0 && z < VOLUME_Z-2))
{
const float qnan = numeric_limits<float>::quiet_NaN();
float3 normal = make_float3(qnan, qnan, qnan);
float Fn, Fp;
int Wn = 0, Wp = 0;
unpack_tsdf (*(pos + elem_step), Fn, Wn);
unpack_tsdf (*(pos - elem_step), Fp, Wp);
if (Wn > 16 && Wp > 16)
normal.z = (Fn - Fp)/cell_size.z;
unpack_tsdf (*(pos + volume.step/sizeof(short2) ), Fn, Wn);
unpack_tsdf (*(pos - volume.step/sizeof(short2) ), Fp, Wp);
if (Wn > 16 && Wp > 16)
normal.y = (Fn - Fp)/cell_size.y;
unpack_tsdf (*(pos + 1), Fn, Wn);
unpack_tsdf (*(pos - 1), Fp, Wp);
if (Wn > 16 && Wp > 16)
normal.x = (Fn - Fp)/cell_size.x;
if (normal.x != qnan && normal.y != qnan && normal.z != qnan)
{
float norm2 = dot(normal, normal);
if (norm2 >= 1e-10)
{
normal *= rsqrt(norm2);
float nt = v_g_x * normal.x + v_g_y * normal.y + v_g_z * normal.z;
float cosine = nt * rsqrt(v_g_x * v_g_x + v_g_y * v_g_y + v_g_z * v_g_z);
if (cosine < 0.5)
integrate = false;
}
}
}
if (integrate)
{
//read and unpack
float tsdf_prev;
int weight_prev;
unpack_tsdf (*pos, tsdf_prev, weight_prev);
const int Wrk = 1;
float tsdf_new = (tsdf_prev * weight_prev + Wrk * tsdf) / (weight_prev + Wrk);
int weight_new = min (weight_prev + Wrk, Tsdf::MAX_WEIGHT);
pack_tsdf (tsdf_new, weight_new, *pos);
}
}
}
} // for(int z = 0; z < VOLUME_Z; ++z)
} // __global__
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void
integrateTsdfVolume (const PtrStepSz<ushort>& depth, const Intr& intr,
const float3& volume_size, const Mat33& Rcurr_inv, const float3& tcurr,
float tranc_dist,
PtrStep<short2> volume, const pcl::gpu::kinfuLS::tsdf_buffer* buffer, DeviceArray2D<float>& depthScaled)
{
depthScaled.create (depth.rows, depth.cols);
dim3 block_scale (32, 8);
dim3 grid_scale (divUp (depth.cols, block_scale.x), divUp (depth.rows, block_scale.y));
//scales depth along ray and converts mm -> meters.
scaleDepth<<<grid_scale, block_scale>>>(depth, depthScaled, intr);
cudaSafeCall ( cudaGetLastError () );
float3 cell_size;
cell_size.x = volume_size.x / buffer->voxels_size.x;
cell_size.y = volume_size.y / buffer->voxels_size.y;
cell_size.z = volume_size.z / buffer->voxels_size.z;
//dim3 block(Tsdf::CTA_SIZE_X, Tsdf::CTA_SIZE_Y);
dim3 block (16, 16);
dim3 grid (divUp (buffer->voxels_size.x, block.x), divUp (buffer->voxels_size.y, block.y));
tsdf23<<<grid, block>>>(depthScaled, volume, tranc_dist, Rcurr_inv, tcurr, intr, cell_size, *buffer);
//tsdf23normal_hack<<<grid, block>>>(depthScaled, volume, tranc_dist, Rcurr_inv, tcurr, intr, cell_size);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void
clearTSDFSlice (PtrStep<short2> volume, pcl::gpu::kinfuLS::tsdf_buffer* buffer, int shiftX, int shiftY, int shiftZ)
{
int newX = buffer->origin_GRID.x + shiftX;
int newY = buffer->origin_GRID.y + shiftY;
int3 minBounds, maxBounds;
//X
if(newX >= 0)
{
minBounds.x = buffer->origin_GRID.x;
maxBounds.x = newX;
}
else
{
minBounds.x = newX + buffer->voxels_size.x;
maxBounds.x = buffer->origin_GRID.x + buffer->voxels_size.x;
}
if(minBounds.x > maxBounds.x)
std::swap(minBounds.x, maxBounds.x);
//Y
if(newY >= 0)
{
minBounds.y = buffer->origin_GRID.y;
maxBounds.y = newY;
}
else
{
minBounds.y = newY + buffer->voxels_size.y;
maxBounds.y = buffer->origin_GRID.y + buffer->voxels_size.y;
}
if(minBounds.y > maxBounds.y)
std::swap(minBounds.y, maxBounds.y);
//Z
minBounds.z = buffer->origin_GRID.z;
maxBounds.z = shiftZ;
// call kernel
dim3 block (32, 16);
dim3 grid (1, 1, 1);
grid.x = divUp (buffer->voxels_size.x, block.x);
grid.y = divUp (buffer->voxels_size.y, block.y);
clearSliceKernel<<<grid, block>>>(volume, *buffer, minBounds, maxBounds);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void
uploadKnownToTSDFSlice (PtrStep<short2> volume, pcl::gpu::kinfuLS::tsdf_buffer* buffer, int shiftX, int shiftY, int shiftZ,
PtrStep<short> known_status)
{
int oldX = buffer->origin_GRID.x - shiftX;
int oldY = buffer->origin_GRID.y - shiftY;
int oldZ = buffer->origin_GRID.z - shiftZ;
int3 minBounds, maxBounds;
//X
if(oldX >= 0)
{
minBounds.x = buffer->origin_GRID.x;
maxBounds.x = oldX;
}
else
{
minBounds.x = oldX + buffer->voxels_size.x;
maxBounds.x = buffer->origin_GRID.x + buffer->voxels_size.x;
}
if(minBounds.x > maxBounds.x)
std::swap(minBounds.x, maxBounds.x);
//Y
if(oldY >= 0)
{
minBounds.y = buffer->origin_GRID.y;
maxBounds.y = oldY;
}
else
{
minBounds.y = oldY + buffer->voxels_size.y;
maxBounds.y = buffer->origin_GRID.y + buffer->voxels_size.y;
}
if(minBounds.y > maxBounds.y)
std::swap(minBounds.y, maxBounds.y);
while (oldZ < 0)
oldZ += buffer->voxels_size.z;
while (oldZ >= buffer->voxels_size.z)
oldZ -= buffer->voxels_size.z;
//Z
minBounds.z = oldZ;
maxBounds.z = shiftZ;
// call kernel
dim3 block (32, 16);
dim3 grid (1, 1, 1);
grid.x = divUp (buffer->voxels_size.x, block.x);
grid.y = divUp (buffer->voxels_size.y, block.y);
uploadKnownToTSDFSliceKernel<<<grid, block>>>(volume, *buffer, minBounds, maxBounds, known_status);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
}
}
}
}
|
754f52e47ad266725d824923f2e0cc0a729b502a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <stdio.h>
#define HOSTCODE true
#include "mvt_kernel.hu"
#define _PB_N 100
int x1[100];
int x2[100];
int y_1[100];
int y_2[100];
int A[100][100];
int main()
{
int i,j;
{
#define cudaCheckReturn(ret) \
do { \
hipError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != hipSuccess) { \
fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == hipSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(hipGetLastError()); \
} while(0)
int *dev_A;
int *dev_x1;
int *dev_x2;
cudaCheckReturn(hipMalloc((void **) &dev_A, (100) * (100) * sizeof(int)));
cudaCheckReturn(hipMalloc((void **) &dev_x1, (100) * sizeof(int)));
cudaCheckReturn(hipMalloc((void **) &dev_x2, (100) * sizeof(int)));
hipMemcpyToSymbol(const_y_1, y_1, (100) * sizeof(int));
hipMemcpyToSymbol(const_y_2, y_2, (100) * sizeof(int));
cudaCheckReturn(hipMemcpy(dev_A, A, (100) * (100) * sizeof(int), hipMemcpyHostToDevice));
cudaCheckReturn(hipMemcpy(dev_x1, x1, (100) * sizeof(int), hipMemcpyHostToDevice));
cudaCheckReturn(hipMemcpy(dev_x2, x2, (100) * sizeof(int), hipMemcpyHostToDevice));
{
dim3 k0_dimBlock(32);
dim3 k0_dimGrid(4);
hipLaunchKernelGGL(( kernel0) , dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dev_x1);
cudaCheckKernel();
}
{
dim3 k1_dimBlock(32);
dim3 k1_dimGrid(4);
hipLaunchKernelGGL(( kernel1) , dim3(k1_dimGrid), dim3(k1_dimBlock), 0, 0, dev_A, dev_x2);
cudaCheckKernel();
}
cudaCheckReturn(hipMemcpy(x1, dev_x1, (100) * sizeof(int), hipMemcpyDeviceToHost));
cudaCheckReturn(hipMemcpy(x2, dev_x2, (100) * sizeof(int), hipMemcpyDeviceToHost));
cudaCheckReturn(hipFree(dev_A));
cudaCheckReturn(hipFree(dev_x1));
cudaCheckReturn(hipFree(dev_x2));
}
}
/*enum RWbar
{
write, 0
read, 1
invalid,2
error, 3
none, 4
read_inside_loop 5
}; */
|
754f52e47ad266725d824923f2e0cc0a729b502a.cu
|
#include <assert.h>
#include <stdio.h>
#define HOSTCODE true
#include "mvt_kernel.hu"
#define _PB_N 100
int x1[100];
int x2[100];
int y_1[100];
int y_2[100];
int A[100][100];
int main()
{
int i,j;
{
#define cudaCheckReturn(ret) \
do { \
cudaError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != cudaSuccess) { \
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == cudaSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(cudaGetLastError()); \
} while(0)
int *dev_A;
int *dev_x1;
int *dev_x2;
cudaCheckReturn(cudaMalloc((void **) &dev_A, (100) * (100) * sizeof(int)));
cudaCheckReturn(cudaMalloc((void **) &dev_x1, (100) * sizeof(int)));
cudaCheckReturn(cudaMalloc((void **) &dev_x2, (100) * sizeof(int)));
cudaMemcpyToSymbol(const_y_1, y_1, (100) * sizeof(int));
cudaMemcpyToSymbol(const_y_2, y_2, (100) * sizeof(int));
cudaCheckReturn(cudaMemcpy(dev_A, A, (100) * (100) * sizeof(int), cudaMemcpyHostToDevice));
cudaCheckReturn(cudaMemcpy(dev_x1, x1, (100) * sizeof(int), cudaMemcpyHostToDevice));
cudaCheckReturn(cudaMemcpy(dev_x2, x2, (100) * sizeof(int), cudaMemcpyHostToDevice));
{
dim3 k0_dimBlock(32);
dim3 k0_dimGrid(4);
kernel0 <<<k0_dimGrid, k0_dimBlock>>> (dev_A, dev_x1);
cudaCheckKernel();
}
{
dim3 k1_dimBlock(32);
dim3 k1_dimGrid(4);
kernel1 <<<k1_dimGrid, k1_dimBlock>>> (dev_A, dev_x2);
cudaCheckKernel();
}
cudaCheckReturn(cudaMemcpy(x1, dev_x1, (100) * sizeof(int), cudaMemcpyDeviceToHost));
cudaCheckReturn(cudaMemcpy(x2, dev_x2, (100) * sizeof(int), cudaMemcpyDeviceToHost));
cudaCheckReturn(cudaFree(dev_A));
cudaCheckReturn(cudaFree(dev_x1));
cudaCheckReturn(cudaFree(dev_x2));
}
}
/*enum RWbar
{
write, 0
read, 1
invalid,2
error, 3
none, 4
read_inside_loop 5
}; */
|
e762100357e834a5aaac3cb35f4005e8ebf18a21.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/hip/SortStable.h>
#include <ATen/Dispatch.h>
#include <ATen/core/Array.h>
#include <ATen/core/TensorBase.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/detail/KernelUtils.h>
#include <ATen/hip/cub.cuh>
#include <ATen/hip/detail/OffsetCalculator.cuh>
#include <ATen/native/hip/SortUtils.cuh>
#include <ATen/native/hip/SortingCommon.cuh>
#include <c10/core/DeviceArray.h>
#include <limits>
namespace at::native {
namespace {
struct offset_t {
int stride;
int begin;
__device__ int operator[](int i) {
return stride * (begin + i);
}
};
// Segmented sort by full sort algorithm:.
// Say we are sorting a (2, 3) tensor. We have in flattened form:
// values 0.4 1.2 5.3 6.2 1.3 2.3
// indices 0 1 2 0 1 2
// segment_id 0 0 0 1 1 1
// First we sort by values, globally:
// values 6.2 5.3 2.3 1.2 1.3 0.4
// indices 0 2 2 1 1 0
// segment_id 1 0 1 0 1 0
// Then we stable sort by segment id:
// values 5.3 1.2 0.4 6.2 2.3 1.3
// indices 2 1 0 0 2 1
// segment_id 0 0 0 1 1 1
// This method can only work if the slice we are sorting (`dim`) is
// innermost, and both values and indices are contiguous. We do this
// by re-arranging the input into this form as needed, which will
// unfortunately allocate memory if the request is not in this form.
// Vectorized sort is slower than iterated sort if the number of
// slices is small (since we're sorting twice, instead of invoking a
// smaller sort `numSlices` times), but the cub sort
// implementation here is a catch-all, so we're not looking for
// efficiency, but instead correctness.
template <typename scalar_t>
__global__ void sort_postprocess_kernel(
const scalar_t* in,
scalar_t* out,
int64_t* index,
const int2* i_s_ptr,
int nsegments,
int nsort) {
CUDA_KERNEL_LOOP(i, nsegments * nsort) {
int segment = i / nsort;
int j = i % nsort;
int offset = segment * nsort;
const scalar_t* in_ = in + offset;
scalar_t* out_ = out + offset;
int64_t* index_ = index + offset;
const int2* i_s_ptr_ = i_s_ptr + offset;
int idx = i_s_ptr_[j].y;
index_[j] = idx;
out_[j] = in_[idx];
}
}
C10_LAUNCH_BOUNDS_1(at::cuda::detail::CUDA_NUM_THREADS)
__global__ void fill_index_and_segment_kernel(
int2* data,
int numel,
at::cuda::detail::IntDivider<uint32_t> nsort_divider) {
CUDA_KERNEL_LOOP(idx, numel) {
auto div_mod = nsort_divider.divmod(idx);
auto segment = static_cast<int>(div_mod.div);
auto sort = static_cast<int>(div_mod.mod);
data[idx] = int2{segment, sort};
}
}
C10_LAUNCH_BOUNDS_1(at::cuda::detail::CUDA_NUM_THREADS)
__global__ void fill_reverse_indices_kernel(
int64_t* data,
int numel,
at::cuda::detail::IntDivider<uint32_t> nsort_divider) {
CUDA_KERNEL_LOOP(idx, numel) {
data[idx] = nsort_divider.mod(idx);
}
}
template <typename scalar_t>
inline void segmented_sort_large_segments(
const int64_t nsegments,
const int64_t nsort,
const int64_t n,
const bool descending,
const scalar_t* self_ptr,
scalar_t* values_ptr,
int64_t* indices_ptr) {
using namespace at::cuda::detail;
auto allocator = at::cuda::getCUDADeviceAllocator();
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 block = CUDA_NUM_THREADS;
dim3 grid = GET_BLOCKS(nsort);
c10::DeviceArray<int64_t> indices(*allocator, nsort);
at::cuda::detail::IntDivider<uint32_t> nsort_divider(nsort);
hipLaunchKernelGGL(( fill_reverse_indices_kernel), dim3(grid), dim3(block), 0, stream,
indices.get(), nsort, nsort_divider);
const int64_t* initial_indices = indices.get();
for (auto i : c10::irange(nsegments)) {
at::cuda::cub::radix_sort_pairs<scalar_t, int64_t>(
self_ptr, values_ptr, initial_indices, indices_ptr, nsort, descending);
indices_ptr += nsort;
self_ptr += nsort;
values_ptr += nsort;
}
}
template <typename scalar_t>
inline void segmented_sort_pairs_by_full_sort(
const int64_t nsegments,
const int64_t nsort,
const int64_t n,
const bool descending,
const scalar_t* const self_ptr,
scalar_t* const values_ptr,
int64_t* const indices_ptr) {
int64_t segment_bits = std::max<int64_t>(
1L, static_cast<int64_t>(::ceil(std::log2(nsegments))));
const auto numel = nsort * nsegments;
auto cuda_allocator = at::cuda::getCUDADeviceAllocator();
auto indices_and_segment = cuda_allocator->allocate(numel * sizeof(int2));
auto i_s_ptr = static_cast<int2*>(indices_and_segment.get());
using namespace at::cuda::detail;
dim3 block = CUDA_NUM_THREADS;
dim3 grid = GET_BLOCKS(numel);
auto stream = c10::hip::getCurrentHIPStreamMasqueradingAsCUDA();
at::cuda::detail::IntDivider<uint32_t> nsort_divider(nsort);
hipLaunchKernelGGL(( fill_index_and_segment_kernel), dim3(grid), dim3(block), 0, stream,
i_s_ptr, numel, nsort_divider);
auto indices_and_segment2 =
cuda_allocator->allocate(nsegments * nsort * sizeof(int2));
auto i_s_ptr2 = static_cast<int2*>(indices_and_segment2.get());
at::cuda::cub::radix_sort_pairs<scalar_t, int2>(
self_ptr, nullptr, i_s_ptr, i_s_ptr2, n, descending);
TORCH_INTERNAL_ASSERT(segment_bits <= 32);
// sort on lower 32bits, i.e. segment index
at::cuda::cub::radix_sort_keys<int64_t>(
reinterpret_cast<int64_t*>(i_s_ptr2),
reinterpret_cast<int64_t*>(i_s_ptr),
n,
false,
0,
segment_bits);
hipLaunchKernelGGL(( sort_postprocess_kernel),
dim3((n + 511) / 512),
dim3(512),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
self_ptr, values_ptr, indices_ptr, i_s_ptr, nsegments, nsort);
}
template <typename scalar_t>
void segmented_sort_pairs(
int64_t nsegments,
int64_t nsort,
int64_t n,
bool descending,
const scalar_t* self_ptr,
scalar_t* values_ptr,
int64_t* indices_ptr) {
const auto numel = nsort * nsegments;
auto cuda_allocator = at::cuda::getCUDADeviceAllocator();
auto reverse_indices = cuda_allocator->allocate(numel * sizeof(int64_t));
int64_t* reverse_indices_ptr = static_cast<int64_t*>(reverse_indices.get());
using namespace at::cuda::detail;
dim3 block = CUDA_NUM_THREADS;
dim3 grid = GET_BLOCKS(numel);
auto stream = c10::hip::getCurrentHIPStreamMasqueradingAsCUDA();
at::cuda::detail::IntDivider<uint32_t> nsort_divider(nsort);
hipLaunchKernelGGL(( fill_reverse_indices_kernel), dim3(grid), dim3(block), 0, stream,
reverse_indices_ptr, numel, nsort_divider);
at::cuda::cub::segmented_sort_pairs(
self_ptr,
values_ptr,
reverse_indices_ptr,
indices_ptr,
n,
nsegments,
offset_t{(int)nsort, 0},
offset_t{(int)nsort, 1},
descending);
}
} // namespace
void launch_stable_sort_kernel(
const TensorBase& self,
int64_t dim,
bool descending,
const TensorBase& values,
const TensorBase& indices) {
const auto numel = self.numel();
if (numel == 0) {
return;
}
int64_t numel_or_intmax =
::min(numel, static_cast<int64_t>(std::numeric_limits<int>::max()));
int64_t nsort = self.size(dim);
int64_t nbatch = (numel_or_intmax / nsort) * nsort;
TORCH_CHECK(nbatch > 0, "Cannot sort dimension of length ", nsort);
int64_t* indices_ptr = indices.mutable_data_ptr<int64_t>();
#if (defined(USE_ROCM) && ROCM_VERSION < 40500)
constexpr bool is_rocm_bf16_sort_unsupported = true;
#else
constexpr bool is_rocm_bf16_sort_unsupported = false;
#endif
AT_DISPATCH_ALL_TYPES_AND3(
kBool, kHalf, kBFloat16, self.scalar_type(), "sort", [&] {
c10::guts::if_constexpr<!(
is_rocm_bf16_sort_unsupported &&
std::is_same<scalar_t, c10::BFloat16>::value)>(
[&](auto _) {
const scalar_t* self_ptr = self.const_data_ptr<scalar_t>();
scalar_t* values_ptr = values.mutable_data_ptr<scalar_t>();
int64_t remaining = _(numel);
while (remaining > 0) {
int64_t n = ::min(remaining, nbatch);
int64_t nsegments = n / nsort;
if (nsegments == 1 ||
nsort >= 1000000) { // rough heuristics where even a single
// sort occupies GPU
segmented_sort_large_segments(
nsegments,
nsort,
n,
descending,
self_ptr,
values_ptr,
indices_ptr);
} else if (nsegments < 128) {
segmented_sort_pairs_by_full_sort(
nsegments,
nsort,
n,
descending,
self_ptr,
values_ptr,
indices_ptr);
} else {
segmented_sort_pairs(
nsegments,
nsort,
n,
descending,
self_ptr,
values_ptr,
indices_ptr);
}
remaining -= n;
self_ptr += n;
values_ptr += n;
indices_ptr += n;
}
},
[&](auto _) {
TORCH_CHECK(_(false), "BFloat16 is not supported on ROCm < 4.5");
});
});
}
} // namespace at::native
|
e762100357e834a5aaac3cb35f4005e8ebf18a21.cu
|
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/cuda/SortStable.h>
#include <ATen/Dispatch.h>
#include <ATen/core/Array.h>
#include <ATen/core/TensorBase.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/detail/KernelUtils.h>
#include <ATen/cuda/cub.cuh>
#include <ATen/cuda/detail/OffsetCalculator.cuh>
#include <ATen/native/cuda/SortUtils.cuh>
#include <ATen/native/cuda/SortingCommon.cuh>
#include <c10/core/DeviceArray.h>
#include <limits>
namespace at::native {
namespace {
struct offset_t {
int stride;
int begin;
__device__ int operator[](int i) {
return stride * (begin + i);
}
};
// Segmented sort by full sort algorithm:.
// Say we are sorting a (2, 3) tensor. We have in flattened form:
// values 0.4 1.2 5.3 6.2 1.3 2.3
// indices 0 1 2 0 1 2
// segment_id 0 0 0 1 1 1
// First we sort by values, globally:
// values 6.2 5.3 2.3 1.2 1.3 0.4
// indices 0 2 2 1 1 0
// segment_id 1 0 1 0 1 0
// Then we stable sort by segment id:
// values 5.3 1.2 0.4 6.2 2.3 1.3
// indices 2 1 0 0 2 1
// segment_id 0 0 0 1 1 1
// This method can only work if the slice we are sorting (`dim`) is
// innermost, and both values and indices are contiguous. We do this
// by re-arranging the input into this form as needed, which will
// unfortunately allocate memory if the request is not in this form.
// Vectorized sort is slower than iterated sort if the number of
// slices is small (since we're sorting twice, instead of invoking a
// smaller sort `numSlices` times), but the cub sort
// implementation here is a catch-all, so we're not looking for
// efficiency, but instead correctness.
template <typename scalar_t>
__global__ void sort_postprocess_kernel(
const scalar_t* in,
scalar_t* out,
int64_t* index,
const int2* i_s_ptr,
int nsegments,
int nsort) {
CUDA_KERNEL_LOOP(i, nsegments * nsort) {
int segment = i / nsort;
int j = i % nsort;
int offset = segment * nsort;
const scalar_t* in_ = in + offset;
scalar_t* out_ = out + offset;
int64_t* index_ = index + offset;
const int2* i_s_ptr_ = i_s_ptr + offset;
int idx = i_s_ptr_[j].y;
index_[j] = idx;
out_[j] = in_[idx];
}
}
C10_LAUNCH_BOUNDS_1(at::cuda::detail::CUDA_NUM_THREADS)
__global__ void fill_index_and_segment_kernel(
int2* data,
int numel,
at::cuda::detail::IntDivider<uint32_t> nsort_divider) {
CUDA_KERNEL_LOOP(idx, numel) {
auto div_mod = nsort_divider.divmod(idx);
auto segment = static_cast<int>(div_mod.div);
auto sort = static_cast<int>(div_mod.mod);
data[idx] = int2{segment, sort};
}
}
C10_LAUNCH_BOUNDS_1(at::cuda::detail::CUDA_NUM_THREADS)
__global__ void fill_reverse_indices_kernel(
int64_t* data,
int numel,
at::cuda::detail::IntDivider<uint32_t> nsort_divider) {
CUDA_KERNEL_LOOP(idx, numel) {
data[idx] = nsort_divider.mod(idx);
}
}
template <typename scalar_t>
inline void segmented_sort_large_segments(
const int64_t nsegments,
const int64_t nsort,
const int64_t n,
const bool descending,
const scalar_t* self_ptr,
scalar_t* values_ptr,
int64_t* indices_ptr) {
using namespace at::cuda::detail;
auto allocator = at::cuda::getCUDADeviceAllocator();
auto stream = at::cuda::getCurrentCUDAStream();
dim3 block = CUDA_NUM_THREADS;
dim3 grid = GET_BLOCKS(nsort);
c10::DeviceArray<int64_t> indices(*allocator, nsort);
at::cuda::detail::IntDivider<uint32_t> nsort_divider(nsort);
fill_reverse_indices_kernel<<<grid, block, 0, stream>>>(
indices.get(), nsort, nsort_divider);
const int64_t* initial_indices = indices.get();
for (auto i : c10::irange(nsegments)) {
at::cuda::cub::radix_sort_pairs<scalar_t, int64_t>(
self_ptr, values_ptr, initial_indices, indices_ptr, nsort, descending);
indices_ptr += nsort;
self_ptr += nsort;
values_ptr += nsort;
}
}
template <typename scalar_t>
inline void segmented_sort_pairs_by_full_sort(
const int64_t nsegments,
const int64_t nsort,
const int64_t n,
const bool descending,
const scalar_t* const self_ptr,
scalar_t* const values_ptr,
int64_t* const indices_ptr) {
int64_t segment_bits = std::max<int64_t>(
1L, static_cast<int64_t>(std::ceil(std::log2(nsegments))));
const auto numel = nsort * nsegments;
auto cuda_allocator = at::cuda::getCUDADeviceAllocator();
auto indices_and_segment = cuda_allocator->allocate(numel * sizeof(int2));
auto i_s_ptr = static_cast<int2*>(indices_and_segment.get());
using namespace at::cuda::detail;
dim3 block = CUDA_NUM_THREADS;
dim3 grid = GET_BLOCKS(numel);
auto stream = c10::cuda::getCurrentCUDAStream();
at::cuda::detail::IntDivider<uint32_t> nsort_divider(nsort);
fill_index_and_segment_kernel<<<grid, block, 0, stream>>>(
i_s_ptr, numel, nsort_divider);
auto indices_and_segment2 =
cuda_allocator->allocate(nsegments * nsort * sizeof(int2));
auto i_s_ptr2 = static_cast<int2*>(indices_and_segment2.get());
at::cuda::cub::radix_sort_pairs<scalar_t, int2>(
self_ptr, nullptr, i_s_ptr, i_s_ptr2, n, descending);
TORCH_INTERNAL_ASSERT(segment_bits <= 32);
// sort on lower 32bits, i.e. segment index
at::cuda::cub::radix_sort_keys<int64_t>(
reinterpret_cast<int64_t*>(i_s_ptr2),
reinterpret_cast<int64_t*>(i_s_ptr),
n,
false,
0,
segment_bits);
sort_postprocess_kernel<<<
(n + 511) / 512,
512,
0,
at::cuda::getCurrentCUDAStream()>>>(
self_ptr, values_ptr, indices_ptr, i_s_ptr, nsegments, nsort);
}
template <typename scalar_t>
void segmented_sort_pairs(
int64_t nsegments,
int64_t nsort,
int64_t n,
bool descending,
const scalar_t* self_ptr,
scalar_t* values_ptr,
int64_t* indices_ptr) {
const auto numel = nsort * nsegments;
auto cuda_allocator = at::cuda::getCUDADeviceAllocator();
auto reverse_indices = cuda_allocator->allocate(numel * sizeof(int64_t));
int64_t* reverse_indices_ptr = static_cast<int64_t*>(reverse_indices.get());
using namespace at::cuda::detail;
dim3 block = CUDA_NUM_THREADS;
dim3 grid = GET_BLOCKS(numel);
auto stream = c10::cuda::getCurrentCUDAStream();
at::cuda::detail::IntDivider<uint32_t> nsort_divider(nsort);
fill_reverse_indices_kernel<<<grid, block, 0, stream>>>(
reverse_indices_ptr, numel, nsort_divider);
at::cuda::cub::segmented_sort_pairs(
self_ptr,
values_ptr,
reverse_indices_ptr,
indices_ptr,
n,
nsegments,
offset_t{(int)nsort, 0},
offset_t{(int)nsort, 1},
descending);
}
} // namespace
void launch_stable_sort_kernel(
const TensorBase& self,
int64_t dim,
bool descending,
const TensorBase& values,
const TensorBase& indices) {
const auto numel = self.numel();
if (numel == 0) {
return;
}
int64_t numel_or_intmax =
std::min(numel, static_cast<int64_t>(std::numeric_limits<int>::max()));
int64_t nsort = self.size(dim);
int64_t nbatch = (numel_or_intmax / nsort) * nsort;
TORCH_CHECK(nbatch > 0, "Cannot sort dimension of length ", nsort);
int64_t* indices_ptr = indices.mutable_data_ptr<int64_t>();
#if (defined(USE_ROCM) && ROCM_VERSION < 40500)
constexpr bool is_rocm_bf16_sort_unsupported = true;
#else
constexpr bool is_rocm_bf16_sort_unsupported = false;
#endif
AT_DISPATCH_ALL_TYPES_AND3(
kBool, kHalf, kBFloat16, self.scalar_type(), "sort", [&] {
c10::guts::if_constexpr<!(
is_rocm_bf16_sort_unsupported &&
std::is_same<scalar_t, c10::BFloat16>::value)>(
[&](auto _) {
const scalar_t* self_ptr = self.const_data_ptr<scalar_t>();
scalar_t* values_ptr = values.mutable_data_ptr<scalar_t>();
int64_t remaining = _(numel);
while (remaining > 0) {
int64_t n = std::min(remaining, nbatch);
int64_t nsegments = n / nsort;
if (nsegments == 1 ||
nsort >= 1000000) { // rough heuristics where even a single
// sort occupies GPU
segmented_sort_large_segments(
nsegments,
nsort,
n,
descending,
self_ptr,
values_ptr,
indices_ptr);
} else if (nsegments < 128) {
segmented_sort_pairs_by_full_sort(
nsegments,
nsort,
n,
descending,
self_ptr,
values_ptr,
indices_ptr);
} else {
segmented_sort_pairs(
nsegments,
nsort,
n,
descending,
self_ptr,
values_ptr,
indices_ptr);
}
remaining -= n;
self_ptr += n;
values_ptr += n;
indices_ptr += n;
}
},
[&](auto _) {
TORCH_CHECK(_(false), "BFloat16 is not supported on ROCm < 4.5");
});
});
}
} // namespace at::native
|
1e725ba1a66262033feb5cd1e2f0627753863863.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
enum class REGULARARRAY_GETITEM_NEXT_AT_ERRORS {
IND_OUT_OF_RANGE // message: "index out of range"
};
template <typename T>
__global__ void
awkward_RegularArray_getitem_next_at(T* tocarry,
int64_t at,
int64_t length,
int64_t size,
uint64_t invocation_index,
uint64_t* err_code) {
if (err_code[0] == NO_ERROR) {
int64_t thread_id = blockIdx.x * blockDim.x + threadIdx.x;
int64_t regular_at = at;
if (regular_at < 0) {
regular_at += size;
}
if (!(0 <= regular_at && regular_at < size)) {
RAISE_ERROR(REGULARARRAY_GETITEM_NEXT_AT_ERRORS::IND_OUT_OF_RANGE)
}
if (thread_id < length) {
tocarry[thread_id] = (thread_id * size) + regular_at;
}
}
}
|
1e725ba1a66262033feb5cd1e2f0627753863863.cu
|
// BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
enum class REGULARARRAY_GETITEM_NEXT_AT_ERRORS {
IND_OUT_OF_RANGE // message: "index out of range"
};
template <typename T>
__global__ void
awkward_RegularArray_getitem_next_at(T* tocarry,
int64_t at,
int64_t length,
int64_t size,
uint64_t invocation_index,
uint64_t* err_code) {
if (err_code[0] == NO_ERROR) {
int64_t thread_id = blockIdx.x * blockDim.x + threadIdx.x;
int64_t regular_at = at;
if (regular_at < 0) {
regular_at += size;
}
if (!(0 <= regular_at && regular_at < size)) {
RAISE_ERROR(REGULARARRAY_GETITEM_NEXT_AT_ERRORS::IND_OUT_OF_RANGE)
}
if (thread_id < length) {
tocarry[thread_id] = (thread_id * size) + regular_at;
}
}
}
|
35531415782fcc4f0e8f404de972e199fb077289.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void profileSubphaseMatrixColoring_kernel() {}
|
35531415782fcc4f0e8f404de972e199fb077289.cu
|
#include "includes.h"
__global__ void profileSubphaseMatrixColoring_kernel() {}
|
e2bf673083250f488cc28088c854b81f73631f41.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// @file source/nbfmm/model/disk.cu
/// @brief The implementation of disk shape generator.
///
/// @author Mu Yang <emfomy@gmail.com>
///
#include <nbfmm/model.hpp>
#include <cmath>
#include <hiprand/hiprand_kernel.h>
#include <thrust/device_vector.h>
#include <nbfmm/core/kernel_function.hpp>
#include <nbfmm/utility.hpp>
/// @addtogroup impl_model
/// @{
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Generate disk shape particles
///
/// @param[in] num_particle the number of particles.
/// @param[in] center_position the center position.
/// @param[in] max_radius the radius.
/// @param[in] weight the weight.
/// @param[in] tick the step size in time.
/// @param[out] position_current the current particle positions.
/// @param[out] position_previous the previous particle positions.
/// @param[out] weight_ptr the particle weights.
///
__global__ void generateDiskDevice(
const int num_particle,
const float2 center_position,
const float max_radius,
const float weight,
const float tick,
float2* position_current,
float2* position_previous,
float* weight_ptr
) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if ( idx >= num_particle ) {
return;
}
hiprandState_t s;
hiprand_init(0, idx, 0, &s);
const float radius = (float(idx+1) / num_particle) * max_radius;
const float2 effect = nbfmm::kernelFunction(make_float2(0.0f, 0.0f), make_float2(radius, 0.0f), weight * idx);
const float angle_difference = acos(1.0 - effect.x * tick * tick / radius / 2.0f);
const float angle_current = 2.0f * M_PI * hiprand_uniform(&s);
const float angle_previous = angle_current - angle_difference;
position_current[idx] = center_position + radius * make_float2(cosf(angle_current), sinf(angle_current));
position_previous[idx] = center_position + radius * make_float2(cosf(angle_previous), sinf(angle_previous));
weight_ptr[idx] = weight;
}
/// @}
// Generate disk shape particles
void nbfmm::model::generateDisk(
const int num_particle,
const float2 center_position,
const float radius,
const float weight,
const float tick,
float2* gpuptr_position_current,
float2* gpuptr_position_previous,
float* gpuptr_weight
) {
assert( num_particle > 0 );
assert( radius > 0 );
assert( weight > 0 );
const int block_dim = kMaxBlockDim;
const int grid_dim = ((num_particle-1)/block_dim)+1;
hipLaunchKernelGGL(( generateDiskDevice), dim3(grid_dim), dim3(block_dim), 0, 0, num_particle, center_position, radius, weight, tick,
gpuptr_position_current, gpuptr_position_previous, gpuptr_weight);
}
|
e2bf673083250f488cc28088c854b81f73631f41.cu
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// @file source/nbfmm/model/disk.cu
/// @brief The implementation of disk shape generator.
///
/// @author Mu Yang <emfomy@gmail.com>
///
#include <nbfmm/model.hpp>
#include <cmath>
#include <curand_kernel.h>
#include <thrust/device_vector.h>
#include <nbfmm/core/kernel_function.hpp>
#include <nbfmm/utility.hpp>
/// @addtogroup impl_model
/// @{
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Generate disk shape particles
///
/// @param[in] num_particle the number of particles.
/// @param[in] center_position the center position.
/// @param[in] max_radius the radius.
/// @param[in] weight the weight.
/// @param[in] tick the step size in time.
/// @param[out] position_current the current particle positions.
/// @param[out] position_previous the previous particle positions.
/// @param[out] weight_ptr the particle weights.
///
__global__ void generateDiskDevice(
const int num_particle,
const float2 center_position,
const float max_radius,
const float weight,
const float tick,
float2* position_current,
float2* position_previous,
float* weight_ptr
) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if ( idx >= num_particle ) {
return;
}
curandState s;
curand_init(0, idx, 0, &s);
const float radius = (float(idx+1) / num_particle) * max_radius;
const float2 effect = nbfmm::kernelFunction(make_float2(0.0f, 0.0f), make_float2(radius, 0.0f), weight * idx);
const float angle_difference = acos(1.0 - effect.x * tick * tick / radius / 2.0f);
const float angle_current = 2.0f * M_PI * curand_uniform(&s);
const float angle_previous = angle_current - angle_difference;
position_current[idx] = center_position + radius * make_float2(cosf(angle_current), sinf(angle_current));
position_previous[idx] = center_position + radius * make_float2(cosf(angle_previous), sinf(angle_previous));
weight_ptr[idx] = weight;
}
/// @}
// Generate disk shape particles
void nbfmm::model::generateDisk(
const int num_particle,
const float2 center_position,
const float radius,
const float weight,
const float tick,
float2* gpuptr_position_current,
float2* gpuptr_position_previous,
float* gpuptr_weight
) {
assert( num_particle > 0 );
assert( radius > 0 );
assert( weight > 0 );
const int block_dim = kMaxBlockDim;
const int grid_dim = ((num_particle-1)/block_dim)+1;
generateDiskDevice<<<grid_dim, block_dim>>>(num_particle, center_position, radius, weight, tick,
gpuptr_position_current, gpuptr_position_previous, gpuptr_weight);
}
|
fdb778fc4d1b9610bbb11b43221d30ad65d4a470.hip
|
// !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2017 XGBoost contributors
*/
#include <thrust/device_vector.h>
#include <xgboost/base.h>
#include "../../../src/common/device_helpers.cuh"
#include "gtest/gtest.h"
struct Shard { int id; };
TEST(DeviceHelpers, Basic) {
std::vector<Shard> shards (4);
for (int i = 0; i < 4; ++i) {
shards[i].id = i;
}
int sum = dh::ReduceShards<int>(&shards, [](Shard& s) { return s.id ; });
ASSERT_EQ(sum, 6);
}
void CreateTestData(xgboost::bst_uint num_rows, int max_row_size,
thrust::host_vector<int> *row_ptr,
thrust::host_vector<xgboost::bst_uint> *rows) {
row_ptr->resize(num_rows + 1);
int sum = 0;
for (xgboost::bst_uint i = 0; i <= num_rows; i++) {
(*row_ptr)[i] = sum;
sum += rand() % max_row_size; // NOLINT
if (i < num_rows) {
for (int j = (*row_ptr)[i]; j < sum; j++) {
(*rows).push_back(i);
}
}
}
}
void TestLbs() {
srand(17);
dh::CubMemory temp_memory;
std::vector<int> test_rows = {4, 100, 1000};
std::vector<int> test_max_row_sizes = {4, 100, 1300};
for (auto num_rows : test_rows) {
for (auto max_row_size : test_max_row_sizes) {
thrust::host_vector<int> h_row_ptr;
thrust::host_vector<xgboost::bst_uint> h_rows;
CreateTestData(num_rows, max_row_size, &h_row_ptr, &h_rows);
thrust::device_vector<size_t> row_ptr = h_row_ptr;
thrust::device_vector<int> output_row(h_rows.size());
auto d_output_row = output_row.data();
dh::TransformLbs(0, &temp_memory, h_rows.size(), dh::Raw(row_ptr),
row_ptr.size() - 1, false,
[=] __device__(size_t idx, size_t ridx) {
d_output_row[idx] = ridx;
});
dh::safe_cuda(hipDeviceSynchronize());
ASSERT_TRUE(h_rows == output_row);
}
}
}
TEST(cub_lbs, Test) { TestLbs(); }
TEST(sumReduce, Test) {
thrust::device_vector<float> data(100, 1.0f);
dh::CubMemory temp;
auto sum = dh::SumReduction(temp, dh::Raw(data), data.size());
ASSERT_NEAR(sum, 100.0f, 1e-5);
}
|
fdb778fc4d1b9610bbb11b43221d30ad65d4a470.cu
|
/*!
* Copyright 2017 XGBoost contributors
*/
#include <thrust/device_vector.h>
#include <xgboost/base.h>
#include "../../../src/common/device_helpers.cuh"
#include "gtest/gtest.h"
struct Shard { int id; };
TEST(DeviceHelpers, Basic) {
std::vector<Shard> shards (4);
for (int i = 0; i < 4; ++i) {
shards[i].id = i;
}
int sum = dh::ReduceShards<int>(&shards, [](Shard& s) { return s.id ; });
ASSERT_EQ(sum, 6);
}
void CreateTestData(xgboost::bst_uint num_rows, int max_row_size,
thrust::host_vector<int> *row_ptr,
thrust::host_vector<xgboost::bst_uint> *rows) {
row_ptr->resize(num_rows + 1);
int sum = 0;
for (xgboost::bst_uint i = 0; i <= num_rows; i++) {
(*row_ptr)[i] = sum;
sum += rand() % max_row_size; // NOLINT
if (i < num_rows) {
for (int j = (*row_ptr)[i]; j < sum; j++) {
(*rows).push_back(i);
}
}
}
}
void TestLbs() {
srand(17);
dh::CubMemory temp_memory;
std::vector<int> test_rows = {4, 100, 1000};
std::vector<int> test_max_row_sizes = {4, 100, 1300};
for (auto num_rows : test_rows) {
for (auto max_row_size : test_max_row_sizes) {
thrust::host_vector<int> h_row_ptr;
thrust::host_vector<xgboost::bst_uint> h_rows;
CreateTestData(num_rows, max_row_size, &h_row_ptr, &h_rows);
thrust::device_vector<size_t> row_ptr = h_row_ptr;
thrust::device_vector<int> output_row(h_rows.size());
auto d_output_row = output_row.data();
dh::TransformLbs(0, &temp_memory, h_rows.size(), dh::Raw(row_ptr),
row_ptr.size() - 1, false,
[=] __device__(size_t idx, size_t ridx) {
d_output_row[idx] = ridx;
});
dh::safe_cuda(cudaDeviceSynchronize());
ASSERT_TRUE(h_rows == output_row);
}
}
}
TEST(cub_lbs, Test) { TestLbs(); }
TEST(sumReduce, Test) {
thrust::device_vector<float> data(100, 1.0f);
dh::CubMemory temp;
auto sum = dh::SumReduction(temp, dh::Raw(data), data.size());
ASSERT_NEAR(sum, 100.0f, 1e-5);
}
|
d84454fc6c9df98334952e91ef1484563dba1b19.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void backward_zero_nonmax_kernel(int n, int *indexes, float *prev_delta)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= n) return;
if (indexes[id] != id) prev_delta[id] = 0;
}
|
d84454fc6c9df98334952e91ef1484563dba1b19.cu
|
#include "includes.h"
__global__ void backward_zero_nonmax_kernel(int n, int *indexes, float *prev_delta)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= n) return;
if (indexes[id] != id) prev_delta[id] = 0;
}
|
2df2eff28c8c73e253edd82059e21ea690247b93.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
#include <parboil.h>
#include <stdio.h>
#include <stdlib.h>
#include "file.h"
#include "common.h"
#include "cuerr.h"
#include "kernels.hip"
static int read_data(float *A0, int nx,int ny,int nz,FILE *fp)
{
int s=0;
for(int i=0;i<nz;i++)
{
for(int j=0;j<ny;j++)
{
for(int k=0;k<nx;k++)
{
fread(A0+s,sizeof(float),1,fp);
s++;
}
}
}
return 0;
}
int main(int argc, char** argv) {
struct pb_TimerSet timers;
struct pb_Parameters *parameters;
printf("CUDA accelerated 7 points stencil codes****\n");
printf("Original version by Li-Wen Chang <lchang20@illinois.edu> and I-Jui Sung<sung10@illinois.edu>\n");
printf("This version maintained by Chris Rodrigues ***********\n");
parameters = pb_ReadParameters(&argc, argv);
pb_InitializeTimerSet(&timers);
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
//declaration
int nx,ny,nz;
int size;
int iteration;
float c0=1.0f/6.0f;
float c1=1.0f/6.0f/6.0f;
if (argc<5)
{
printf("Usage: probe nx ny nz tx ty t\n"
"nx: the grid size x\n"
"ny: the grid size y\n"
"nz: the grid size z\n"
"t: the iteration time\n");
return -1;
}
nx = atoi(argv[1]);
if (nx<1)
return -1;
ny = atoi(argv[2]);
if (ny<1)
return -1;
nz = atoi(argv[3]);
if (nz<1)
return -1;
iteration = atoi(argv[4]);
if(iteration<1)
return -1;
//host data
float *h_A0;
float *h_Anext;
//device
float *d_A0;
float *d_Anext;
size=nx*ny*nz;
h_A0=(float*)malloc(sizeof(float)*size);
h_Anext=(float*)malloc(sizeof(float)*size);
pb_SwitchToTimer(&timers, pb_TimerID_IO);
FILE *fp = fopen(parameters->inpFiles[0], "rb");
read_data(h_A0, nx,ny,nz,fp);
fclose(fp);
pb_SwitchToTimer(&timers, pb_TimerID_COPY);
//memory allocation
hipMalloc((void **)&d_A0, size*sizeof(float));
hipMalloc((void **)&d_Anext, size*sizeof(float));
hipMemset(d_Anext,0,size*sizeof(float));
//memory copy
hipMemcpy(d_A0, h_A0, size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_Anext, d_A0, size*sizeof(float), hipMemcpyDeviceToDevice);
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
//only use tx-by-ty threads
int tx=32;
int ty=4;
dim3 block (tx, ty, 1);
//also change threads size maping from tx by ty to 2tx x ty
dim3 grid ((nx+tx*2-1)/(tx*2), (ny+ty-1)/ty,1);
int sh_size = tx*2*ty*sizeof(float);
//main execution
pb_SwitchToTimer(&timers, pb_TimerID_KERNEL);
for(int t=0;t<iteration;t++)
{
hipLaunchKernelGGL(( block2D_hybrid_coarsen_x), dim3(grid), dim3(block),sh_size, 0, c0,c1, d_A0, d_Anext, nx, ny, nz);
float *d_temp = d_A0;
d_A0 = d_Anext;
d_Anext = d_temp;
}
CUERR // check and clear any existing errors
float *d_temp = d_A0;
d_A0 = d_Anext;
d_Anext = d_temp;
pb_SwitchToTimer(&timers, pb_TimerID_COPY);
hipMemcpy(h_Anext, d_Anext,size*sizeof(float), hipMemcpyDeviceToHost);
hipFree(d_A0);
hipFree(d_Anext);
if (parameters->outFile) {
pb_SwitchToTimer(&timers, pb_TimerID_IO);
outputData(parameters->outFile,h_Anext,nx,ny,nz);
}
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
free (h_A0);
free (h_Anext);
pb_SwitchToTimer(&timers, pb_TimerID_NONE);
pb_PrintTimerSet(&timers);
pb_FreeParameters(parameters);
return 0;
}
|
2df2eff28c8c73e253edd82059e21ea690247b93.cu
|
/***************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
#include <parboil.h>
#include <stdio.h>
#include <stdlib.h>
#include "file.h"
#include "common.h"
#include "cuerr.h"
#include "kernels.cu"
static int read_data(float *A0, int nx,int ny,int nz,FILE *fp)
{
int s=0;
for(int i=0;i<nz;i++)
{
for(int j=0;j<ny;j++)
{
for(int k=0;k<nx;k++)
{
fread(A0+s,sizeof(float),1,fp);
s++;
}
}
}
return 0;
}
int main(int argc, char** argv) {
struct pb_TimerSet timers;
struct pb_Parameters *parameters;
printf("CUDA accelerated 7 points stencil codes****\n");
printf("Original version by Li-Wen Chang <lchang20@illinois.edu> and I-Jui Sung<sung10@illinois.edu>\n");
printf("This version maintained by Chris Rodrigues ***********\n");
parameters = pb_ReadParameters(&argc, argv);
pb_InitializeTimerSet(&timers);
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
//declaration
int nx,ny,nz;
int size;
int iteration;
float c0=1.0f/6.0f;
float c1=1.0f/6.0f/6.0f;
if (argc<5)
{
printf("Usage: probe nx ny nz tx ty t\n"
"nx: the grid size x\n"
"ny: the grid size y\n"
"nz: the grid size z\n"
"t: the iteration time\n");
return -1;
}
nx = atoi(argv[1]);
if (nx<1)
return -1;
ny = atoi(argv[2]);
if (ny<1)
return -1;
nz = atoi(argv[3]);
if (nz<1)
return -1;
iteration = atoi(argv[4]);
if(iteration<1)
return -1;
//host data
float *h_A0;
float *h_Anext;
//device
float *d_A0;
float *d_Anext;
size=nx*ny*nz;
h_A0=(float*)malloc(sizeof(float)*size);
h_Anext=(float*)malloc(sizeof(float)*size);
pb_SwitchToTimer(&timers, pb_TimerID_IO);
FILE *fp = fopen(parameters->inpFiles[0], "rb");
read_data(h_A0, nx,ny,nz,fp);
fclose(fp);
pb_SwitchToTimer(&timers, pb_TimerID_COPY);
//memory allocation
cudaMalloc((void **)&d_A0, size*sizeof(float));
cudaMalloc((void **)&d_Anext, size*sizeof(float));
cudaMemset(d_Anext,0,size*sizeof(float));
//memory copy
cudaMemcpy(d_A0, h_A0, size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_Anext, d_A0, size*sizeof(float), cudaMemcpyDeviceToDevice);
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
//only use tx-by-ty threads
int tx=32;
int ty=4;
dim3 block (tx, ty, 1);
//also change threads size maping from tx by ty to 2tx x ty
dim3 grid ((nx+tx*2-1)/(tx*2), (ny+ty-1)/ty,1);
int sh_size = tx*2*ty*sizeof(float);
//main execution
pb_SwitchToTimer(&timers, pb_TimerID_KERNEL);
for(int t=0;t<iteration;t++)
{
block2D_hybrid_coarsen_x<<<grid, block,sh_size>>>(c0,c1, d_A0, d_Anext, nx, ny, nz);
float *d_temp = d_A0;
d_A0 = d_Anext;
d_Anext = d_temp;
}
CUERR // check and clear any existing errors
float *d_temp = d_A0;
d_A0 = d_Anext;
d_Anext = d_temp;
pb_SwitchToTimer(&timers, pb_TimerID_COPY);
cudaMemcpy(h_Anext, d_Anext,size*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_A0);
cudaFree(d_Anext);
if (parameters->outFile) {
pb_SwitchToTimer(&timers, pb_TimerID_IO);
outputData(parameters->outFile,h_Anext,nx,ny,nz);
}
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
free (h_A0);
free (h_Anext);
pb_SwitchToTimer(&timers, pb_TimerID_NONE);
pb_PrintTimerSet(&timers);
pb_FreeParameters(parameters);
return 0;
}
|
f6e88c2b3e7f376b33a5facb3bebc8c20b820e8f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by Fabio Lipreri on 2019-09-05.
//
/*
* class OutputLayer:
def predict(self, x):
exp = np.exp(x-np.max(x))
return exp / exp.sum(axis=0)
def loss(self, x, y):
p = self.predict(x)
return -np.log(p[y])
def diff(self, x, y):
p = self.predict(x)
p[y] -= 1
return p
*/
#include <cmath>
#include "outputlayer.h"
#include "rocblas.h"
#include "../utils/common.h"
#include "../utils/cudamath.h"
void OutputLayer::predict(hipblasHandle_t handle, const Matrix &x){
float sum = 0.0f;
float alpha;
int maxindex = -1;
if(!predictions.isDevAlloc()){
predictions.allocate_size(x.getX(), x.getY());
}
CHECK_CUBLAS(hipblasIsamax(handle, x.getX(), x.getDevData().get(), 1, &maxindex));
dim3 TxB(BLOCK_SIZE);
dim3 num_blocks((x.getY() * x.getX() + TxB.x - 1) / TxB.x);
hipLaunchKernelGGL(( exp_predict), dim3(num_blocks), dim3(TxB), 0, 0, predictions.getDevData().get(), x.getDevData().get(), x[maxindex-1], x.getX(), x.getY());
CHECK_CUBLAS(hipblasSasum(handle, predictions.getX(), predictions.getDevData().get(), 1, &sum))
alpha = 1/sum;
CHECK_CUBLAS(hipblasSscal(handle, predictions.getX(), &alpha, predictions.getDevData().get(), 1))
}
float OutputLayer::loss(hipblasHandle_t handle, const Matrix &x, int y) {
if(!predictions.isDevAlloc()){
this->predict(handle, x);
predictions.cpyDevToHost();
}
//predictions.print_matrix();
return -1.0 * log(predictions[y]);
}
const Matrix& OutputLayer::diff(hipblasHandle_t handle, const Matrix &x, int y){
if(!predictions.isDevAlloc()){
this->predict(handle, x);
predictions.cpyDevToHost();
}
predictions[y] -= 1;
predictions.cpyHostToDev();
return predictions;
}
const Matrix &OutputLayer::getPredictions() const {
return predictions;
}
|
f6e88c2b3e7f376b33a5facb3bebc8c20b820e8f.cu
|
//
// Created by Fabio Lipreri on 2019-09-05.
//
/*
* class OutputLayer:
def predict(self, x):
exp = np.exp(x-np.max(x))
return exp / exp.sum(axis=0)
def loss(self, x, y):
p = self.predict(x)
return -np.log(p[y])
def diff(self, x, y):
p = self.predict(x)
p[y] -= 1
return p
*/
#include <cmath>
#include "outputlayer.h"
#include "cublas_v2.h"
#include "../utils/common.h"
#include "../utils/cudamath.h"
void OutputLayer::predict(cublasHandle_t handle, const Matrix &x){
float sum = 0.0f;
float alpha;
int maxindex = -1;
if(!predictions.isDevAlloc()){
predictions.allocate_size(x.getX(), x.getY());
}
CHECK_CUBLAS(cublasIsamax(handle, x.getX(), x.getDevData().get(), 1, &maxindex));
dim3 TxB(BLOCK_SIZE);
dim3 num_blocks((x.getY() * x.getX() + TxB.x - 1) / TxB.x);
exp_predict<<<num_blocks, TxB>>>(predictions.getDevData().get(), x.getDevData().get(), x[maxindex-1], x.getX(), x.getY());
CHECK_CUBLAS(cublasSasum(handle, predictions.getX(), predictions.getDevData().get(), 1, &sum))
alpha = 1/sum;
CHECK_CUBLAS(cublasSscal(handle, predictions.getX(), &alpha, predictions.getDevData().get(), 1))
}
float OutputLayer::loss(cublasHandle_t handle, const Matrix &x, int y) {
if(!predictions.isDevAlloc()){
this->predict(handle, x);
predictions.cpyDevToHost();
}
//predictions.print_matrix();
return -1.0 * log(predictions[y]);
}
const Matrix& OutputLayer::diff(cublasHandle_t handle, const Matrix &x, int y){
if(!predictions.isDevAlloc()){
this->predict(handle, x);
predictions.cpyDevToHost();
}
predictions[y] -= 1;
predictions.cpyHostToDev();
return predictions;
}
const Matrix &OutputLayer::getPredictions() const {
return predictions;
}
|
058130bb4e9f97e7046bec2fe14f29e78cb6fddc.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file elemwise_unary_op_basic.cu
* \brief GPU Implementation of unary functions.
*/
#include "./elemwise_binary_op.h"
namespace mxnet {
namespace op {
NNVM_REGISTER_OP(relu)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::relu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::relu>);
NNVM_REGISTER_OP(_backward_relu)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::relu_grad>>);
NNVM_REGISTER_OP(sigmoid)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::sigmoid>);
NNVM_REGISTER_OP(_backward_sigmoid)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::sigmoid_grad>>);
NNVM_REGISTER_OP(hard_sigmoid)
.set_attr<FCompute>("FCompute<gpu>", HardSigmoidForward<gpu>);
NNVM_REGISTER_OP(_backward_hard_sigmoid)
.set_attr<FCompute>("FCompute<gpu>", HardSigmoidBackward<gpu>);
// softsign
NNVM_REGISTER_OP(softsign)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::softsign>);
NNVM_REGISTER_OP(_backward_softsign)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::softsign_grad>>);
// copy
NNVM_REGISTER_OP(_copy)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::IdentityComputeEx<gpu>);
NNVM_REGISTER_OP(_backward_copy)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>);
NNVM_REGISTER_OP(BlockGrad)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>);
NNVM_REGISTER_OP(make_loss)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::IdentityComputeEx<gpu>);
// identity output as first input, but attributes are constrainted to be like rhs
NNVM_REGISTER_OP(_identity_with_attr_like_rhs)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::IdentityComputeFirstItemEx<gpu>);
NNVM_REGISTER_OP(reshape_like)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>);
void ShapeComputeGPU(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
const TBlob& in_data = inputs[0];
const TBlob& out_data = outputs[0];
mshadow::Stream<gpu> *s = ctx.get_stream<gpu>();
hipMemcpyAsync(out_data.dptr_,
in_data.shape_.data(),
in_data.ndim() * sizeof(int64_t),
hipMemcpyHostToDevice,
mshadow::Stream<gpu>::GetStream(s));
}
NNVM_REGISTER_OP(shape_array)
.set_attr<FCompute>("FCompute<gpu>", ShapeComputeGPU);
void SizeComputeGPU(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mxnet_op;
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
const TBlob& in_data = inputs[0];
const TBlob& out_data = outputs[0];
mshadow::Stream<gpu> *s = ctx.get_stream<gpu>();
const index_t size_var = in_data.Size();
hipMemcpyAsync(out_data.dptr_,
&size_var,
1U * sizeof(int64_t),
hipMemcpyHostToDevice,
mshadow::Stream<gpu>::GetStream(s));
}
NNVM_REGISTER_OP(size_array)
.set_attr<FCompute>("FCompute<gpu>", SizeComputeGPU);
NNVM_REGISTER_OP(Cast)
.set_attr<FCompute>("FCompute<gpu>", CastCompute<gpu>);
NNVM_REGISTER_OP(_backward_cast)
.set_attr<FCompute>("FCompute<gpu>", CastCompute<gpu>);
// negative
NNVM_REGISTER_OP(negative)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::negation>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::negation>);
// reciprocal
NNVM_REGISTER_OP(reciprocal)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::reciprocal>);
NNVM_REGISTER_OP(_backward_reciprocal)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::reciprocal_grad> >);
// abs
NNVM_REGISTER_OP(abs)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::abs>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::abs>);
NNVM_REGISTER_OP(_backward_abs)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<gpu, unary_bwd<mshadow_op::sign> >);
// sign
NNVM_REGISTER_OP(sign)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::sign>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::sign>);
NNVM_REGISTER_OP(_backward_sign)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::sign_grad> >);
// round
NNVM_REGISTER_OP(round)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::round>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::round>);
// ceil
NNVM_REGISTER_OP(ceil)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::ceil>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::ceil>);
// floor
NNVM_REGISTER_OP(floor)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::floor>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::floor>);
// trunc
NNVM_REGISTER_OP(trunc)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::trunc>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::trunc>);
// rint
NNVM_REGISTER_OP(rint)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::rint>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::rint>);
// fix
NNVM_REGISTER_OP(fix)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::fix>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::fix>);
// square
NNVM_REGISTER_OP(square)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::square>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::square>);
NNVM_REGISTER_OP(_backward_square)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::square_grad> >);
// sqrt
NNVM_REGISTER_OP(sqrt)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::square_root>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::square_root>);
NNVM_REGISTER_OP(_backward_sqrt)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::square_root_grad> >);
// rsqrt
NNVM_REGISTER_OP(rsqrt)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::reciprocal_square_root>);
NNVM_REGISTER_OP(_backward_rsqrt)
.set_attr<FCompute>("FCompute<gpu>",
ElemwiseBinaryOp::Compute<gpu, unary_bwd<mshadow_op::reciprocal_square_root_grad> >);
// cbrt
NNVM_REGISTER_OP(cbrt)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::cube_root>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::cube_root>);
NNVM_REGISTER_OP(_backward_cbrt)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::cube_root_grad> >);
// rcbrt
NNVM_REGISTER_OP(rcbrt)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::reciprocal_cube_root>);
NNVM_REGISTER_OP(_backward_rcbrt)
.set_attr<FCompute>("FCompute<gpu>",
ElemwiseBinaryOp::Compute<gpu, unary_bwd<mshadow_op::reciprocal_cube_root_grad> >);
// exp
NNVM_REGISTER_OP(exp)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::exp>);
// log
NNVM_REGISTER_OP(log)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::log>);
// log10
NNVM_REGISTER_OP(log10)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::log10>);
// log2
NNVM_REGISTER_OP(log2)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::log2>);
NNVM_REGISTER_OP(_backward_log)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::log_grad> >);
NNVM_REGISTER_OP(_backward_log10)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::log10_grad> >);
NNVM_REGISTER_OP(_backward_log2)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::log2_grad> >);
// log1p
NNVM_REGISTER_OP(log1p)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::log1p>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::log1p>);
NNVM_REGISTER_OP(_backward_log1p)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::log1p_grad> >);
// expm1
NNVM_REGISTER_OP(expm1)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::expm1>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::expm1>);
NNVM_REGISTER_OP(_backward_expm1)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::exp> >);
// gamma
NNVM_REGISTER_OP(gamma)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::gamma>);
NNVM_REGISTER_OP(_backward_gamma)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::gamma_grad> >);
// gammaln
NNVM_REGISTER_OP(gammaln)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::gammaln>);
NNVM_REGISTER_OP(_backward_gammaln)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::gammaln_grad> >);
// logical not
NNVM_REGISTER_OP(logical_not)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::nt>);
} // namespace op
} // namespace mxnet
|
058130bb4e9f97e7046bec2fe14f29e78cb6fddc.cu
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file elemwise_unary_op_basic.cu
* \brief GPU Implementation of unary functions.
*/
#include "./elemwise_binary_op.h"
namespace mxnet {
namespace op {
NNVM_REGISTER_OP(relu)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::relu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::relu>);
NNVM_REGISTER_OP(_backward_relu)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::relu_grad>>);
NNVM_REGISTER_OP(sigmoid)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::sigmoid>);
NNVM_REGISTER_OP(_backward_sigmoid)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::sigmoid_grad>>);
NNVM_REGISTER_OP(hard_sigmoid)
.set_attr<FCompute>("FCompute<gpu>", HardSigmoidForward<gpu>);
NNVM_REGISTER_OP(_backward_hard_sigmoid)
.set_attr<FCompute>("FCompute<gpu>", HardSigmoidBackward<gpu>);
// softsign
NNVM_REGISTER_OP(softsign)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::softsign>);
NNVM_REGISTER_OP(_backward_softsign)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::softsign_grad>>);
// copy
NNVM_REGISTER_OP(_copy)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::IdentityComputeEx<gpu>);
NNVM_REGISTER_OP(_backward_copy)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>);
NNVM_REGISTER_OP(BlockGrad)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>);
NNVM_REGISTER_OP(make_loss)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::IdentityComputeEx<gpu>);
// identity output as first input, but attributes are constrainted to be like rhs
NNVM_REGISTER_OP(_identity_with_attr_like_rhs)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::IdentityComputeFirstItemEx<gpu>);
NNVM_REGISTER_OP(reshape_like)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>);
void ShapeComputeGPU(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
const TBlob& in_data = inputs[0];
const TBlob& out_data = outputs[0];
mshadow::Stream<gpu> *s = ctx.get_stream<gpu>();
cudaMemcpyAsync(out_data.dptr_,
in_data.shape_.data(),
in_data.ndim() * sizeof(int64_t),
cudaMemcpyHostToDevice,
mshadow::Stream<gpu>::GetStream(s));
}
NNVM_REGISTER_OP(shape_array)
.set_attr<FCompute>("FCompute<gpu>", ShapeComputeGPU);
void SizeComputeGPU(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mxnet_op;
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
const TBlob& in_data = inputs[0];
const TBlob& out_data = outputs[0];
mshadow::Stream<gpu> *s = ctx.get_stream<gpu>();
const index_t size_var = in_data.Size();
cudaMemcpyAsync(out_data.dptr_,
&size_var,
1U * sizeof(int64_t),
cudaMemcpyHostToDevice,
mshadow::Stream<gpu>::GetStream(s));
}
NNVM_REGISTER_OP(size_array)
.set_attr<FCompute>("FCompute<gpu>", SizeComputeGPU);
NNVM_REGISTER_OP(Cast)
.set_attr<FCompute>("FCompute<gpu>", CastCompute<gpu>);
NNVM_REGISTER_OP(_backward_cast)
.set_attr<FCompute>("FCompute<gpu>", CastCompute<gpu>);
// negative
NNVM_REGISTER_OP(negative)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::negation>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::negation>);
// reciprocal
NNVM_REGISTER_OP(reciprocal)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::reciprocal>);
NNVM_REGISTER_OP(_backward_reciprocal)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::reciprocal_grad> >);
// abs
NNVM_REGISTER_OP(abs)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::abs>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::abs>);
NNVM_REGISTER_OP(_backward_abs)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<gpu, unary_bwd<mshadow_op::sign> >);
// sign
NNVM_REGISTER_OP(sign)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::sign>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::sign>);
NNVM_REGISTER_OP(_backward_sign)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::sign_grad> >);
// round
NNVM_REGISTER_OP(round)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::round>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::round>);
// ceil
NNVM_REGISTER_OP(ceil)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::ceil>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::ceil>);
// floor
NNVM_REGISTER_OP(floor)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::floor>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::floor>);
// trunc
NNVM_REGISTER_OP(trunc)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::trunc>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::trunc>);
// rint
NNVM_REGISTER_OP(rint)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::rint>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::rint>);
// fix
NNVM_REGISTER_OP(fix)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::fix>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::fix>);
// square
NNVM_REGISTER_OP(square)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::square>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::square>);
NNVM_REGISTER_OP(_backward_square)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::square_grad> >);
// sqrt
NNVM_REGISTER_OP(sqrt)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::square_root>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::square_root>);
NNVM_REGISTER_OP(_backward_sqrt)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::square_root_grad> >);
// rsqrt
NNVM_REGISTER_OP(rsqrt)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::reciprocal_square_root>);
NNVM_REGISTER_OP(_backward_rsqrt)
.set_attr<FCompute>("FCompute<gpu>",
ElemwiseBinaryOp::Compute<gpu, unary_bwd<mshadow_op::reciprocal_square_root_grad> >);
// cbrt
NNVM_REGISTER_OP(cbrt)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::cube_root>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::cube_root>);
NNVM_REGISTER_OP(_backward_cbrt)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::cube_root_grad> >);
// rcbrt
NNVM_REGISTER_OP(rcbrt)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::reciprocal_cube_root>);
NNVM_REGISTER_OP(_backward_rcbrt)
.set_attr<FCompute>("FCompute<gpu>",
ElemwiseBinaryOp::Compute<gpu, unary_bwd<mshadow_op::reciprocal_cube_root_grad> >);
// exp
NNVM_REGISTER_OP(exp)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::exp>);
// log
NNVM_REGISTER_OP(log)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::log>);
// log10
NNVM_REGISTER_OP(log10)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::log10>);
// log2
NNVM_REGISTER_OP(log2)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::log2>);
NNVM_REGISTER_OP(_backward_log)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::log_grad> >);
NNVM_REGISTER_OP(_backward_log10)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::log10_grad> >);
NNVM_REGISTER_OP(_backward_log2)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::log2_grad> >);
// log1p
NNVM_REGISTER_OP(log1p)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::log1p>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::log1p>);
NNVM_REGISTER_OP(_backward_log1p)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::log1p_grad> >);
// expm1
NNVM_REGISTER_OP(expm1)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::expm1>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::expm1>);
NNVM_REGISTER_OP(_backward_expm1)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::exp> >);
// gamma
NNVM_REGISTER_OP(gamma)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::gamma>);
NNVM_REGISTER_OP(_backward_gamma)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::gamma_grad> >);
// gammaln
NNVM_REGISTER_OP(gammaln)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::gammaln>);
NNVM_REGISTER_OP(_backward_gammaln)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::gammaln_grad> >);
// logical not
NNVM_REGISTER_OP(logical_not)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::nt>);
} // namespace op
} // namespace mxnet
|
b35fec6095aa9a9d23a6b5549291b25e7fb1ad4a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#define LOGMAP 1
#define NORM 2
#define PERC 3
#define DEFAULT_TEST LOGMAP
#define ALPHA 1.0f
// CITATION: Code snippets taken from examples.cu by Mark
/**************************************************************
* reduce_sum: compute the sum of the elements of an array
* Simple version: we only handle one block of threads
***************************************************************/
__device__ void reduce_sum_dev(uint n, float *x) {
uint myId = threadIdx.x;
for(uint m = n >> 1; m > 0; m = n >> 1) {
n -= m;
__syncthreads();
if(myId < m)
x[myId] += x[myId+n];
}
}
__global__ void reduce_sum(uint n, float *x) {
reduce_sum_dev(n, x);
}
/************************
* logistic map
************************/
__global__ void logmap(float *x, int n, int m) {
uint i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n)
for(int j = 0; j < m; j++)
x[i] = ALPHA * x[i] * (1.0f - x[i]);
}
void logmap_ref(float *x, int n, int m, float *z) {
memcpy(z, x, n*sizeof(float));
for(int j = 0; j < m; j++)
for(int i = 0; i < n; i++)
z[i] = ALPHA * z[i] * (1.0f - z[i]);
}
/************************
* norm calculation
************************/
__global__ void norm(float *x, int n, float *z) {
uint i = blockDim.x * blockIdx.x + threadIdx.x;
uint blockBase = blockDim.x * blockIdx.x;
uint m = min(blockDim.x, n - blockBase);
if (i < n)
x[i] = pow(x[i], 2);
__syncthreads();
reduce_sum_dev(m, &(x[blockBase]));
if (i < n && threadIdx.x == 0)
z[blockIdx.x] = sqrt(x[i]);
}
float norm_ref(float *x, int n) {
float sum = 0.0;
for(int i = 0; i < n; i++)
sum += pow(x[i],2);
return sqrt(sum);
}
/************************
* perc
************************/
__global__ void setup_kernel(uint n, hiprandState_t *state) {
uint myId = blockDim.x * blockIdx.x + threadIdx.x;
if(myId < n)
hiprand_init(1234, myId, 0, &state[myId]);
}
__global__ void rndm(uint *a, int m, hiprandState_t *state) {
uint i = blockDim.x * blockIdx.x + threadIdx.x;
for(int j = 0; j < m; j++)
a[i*m + j] = hiprand_uniform(&state[i])*1000;
}
/*****************************************************
* print_vec: print the first few elements of a vector
******************************************************/
void print_vec(float *x, uint n, const char *fmt, const char *who) {
printf("%s = ", who);
for(int i = 0; i < n; i++) {
if(i > 0) printf(", ");
printf(fmt, x[i]);
}
if(n > 10) printf(", ...");
printf("\n");
}
void print_vec(uint *x, uint n, const char *fmt, const char *who) {
printf("%s = ", who);
for(int i = 0; i < n; i++) {
if(i > 0) printf(", ");
printf(fmt, x[i]);
}
if(n > 10) printf(", ...");
printf("\n");
}
/*****************************************************
* near(x, y): true if x and y are "nearly" equal
******************************************************/
int near(uint n, float x, float y) {
return(abs(x-y) < max(10.0, sqrt((float)n))*1.0e-7*max(1.0, max(abs(x), abs(y))));
}
int main(int argc, char **argv) {
uint n = (argc >= 2) ? atoi(argv[1]) : 1000000;
uint nn = n;
uint what = (argc >= 3) ? atoi(argv[2]) : DEFAULT_TEST;
// m for LOGMAP
uint m = (argc >= 4 && what == LOGMAP) ? atoi(argv[3]) : 0;
float *x, *y, *z, *z_ref;
float *dev_x, *dev_y, *dev_z;
uint *a, *dev_a;
hiprandState_t *dev_randState;
hipDeviceProp_t prop;
int ndev;
hipGetDeviceCount(&ndev);
if(ndev < 1) {
fprintf(stderr, "No CUDA device found\n");
exit(-1);
}
hipGetDeviceProperties(&prop, 0);
int size = n*sizeof(float);
x = (float *)malloc(size);
y = (float *)malloc(size);
z = (float *)malloc(size);
a = (uint *)malloc(size);
z_ref = (float *)malloc(size);
// Use a logistic map to make some pseudo-random numbers
// It's fast, but the distribution isn't very uniform, and
// the other statistical properties are lousy. But it's
// fast, and that's all we need for some simple tests.
x[0] = 0.123;
y[0] = sqrt(0.3);
for(int i = 1; i < n; i++) {
x[i] = 3.8*x[i-1]*(1.0 - x[i-1]);
y[i] = 3.9*y[i-1]*(1.0 - y[i-1]);
}
printf("The GPU is a %s\n", prop.name);
printf("Cuda capability %d.%d.\n", prop.major, prop.minor);
print_vec(x, min(10, n), "%5.3f", "x");
print_vec(y, min(10, n), "%5.3f", "y");
hipMalloc((void**)(&dev_x), size);
hipMalloc((void**)(&dev_y), size);
hipMalloc((void**)(&dev_z), size);
hipMalloc((void**)(&dev_a), size);
hipMemcpy(dev_x, x, size, hipMemcpyHostToDevice);
hipMemcpy(dev_y, y, size, hipMemcpyHostToDevice);
hipMalloc((void **)(&dev_randState), n*sizeof(hiprandState_t));
switch(what) {
case LOGMAP:
hipLaunchKernelGGL(( logmap), dim3(ceil(n/256.0)),dim3(256), 0, 0, dev_x, n, m);
printf("a: size = %d, z=%016llx dev_x=%016llx\n", size, z, dev_x);
hipMemcpy(z, dev_x, size, hipMemcpyDeviceToHost);
printf("b\n");
logmap_ref(x, n, m, z_ref);
break;
case NORM:
hipLaunchKernelGGL(( norm), dim3(ceil(n/1024.0)),dim3(1024), 0, 0, dev_x, n, dev_z);
if (ceil(n/1024.0) > 1)
hipLaunchKernelGGL(( norm), dim3(1),dim3(ceil(n/1024.0)), 0, 0, dev_z, ceil(n/1024.0), dev_z);
hipMemcpy(z, dev_z, size, hipMemcpyDeviceToHost);
z_ref[0] = norm_ref(x, n);
nn = 1;
break;
case PERC:
hipLaunchKernelGGL(( setup_kernel), dim3(ceil(n/1024.0)),dim3(1024), 0, 0, n, dev_randState);
hipLaunchKernelGGL(( rndm), dim3(ceil(n/1024.0)),dim3(1024), 0, 0, dev_a, 1, dev_randState);
hipMemcpy(a, dev_a, size, hipMemcpyDeviceToHost);
print_vec(a, min(10, nn), "%d", "a");
break;
default:
fprintf(stderr, "ERROR: unknown test case -- %d\n", what);
exit(-1);
}
for(int i = 0; i < nn; i++) { // check the result
if(!near(n, z[i], z_ref[i])) {
fprintf(stderr, "ERROR: i=%d: z[i] = %15.10f, z_ref[i] = %15.10f\n", i, z[i], z_ref[i]);
exit(-1);
}
}
print_vec(z, min(10, nn), "%5.3f", "z");
printf("The results match!\n");
hipFree(dev_x);
hipFree(dev_y);
hipFree(dev_z);
free(x);
free(y);
free(z);
free(z_ref);
exit(0);
}
|
b35fec6095aa9a9d23a6b5549291b25e7fb1ad4a.cu
|
#include <stdio.h>
#include <math.h>
#include <cuda.h>
#include <curand_kernel.h>
#define LOGMAP 1
#define NORM 2
#define PERC 3
#define DEFAULT_TEST LOGMAP
#define ALPHA 1.0f
// CITATION: Code snippets taken from examples.cu by Mark
/**************************************************************
* reduce_sum: compute the sum of the elements of an array
* Simple version: we only handle one block of threads
***************************************************************/
__device__ void reduce_sum_dev(uint n, float *x) {
uint myId = threadIdx.x;
for(uint m = n >> 1; m > 0; m = n >> 1) {
n -= m;
__syncthreads();
if(myId < m)
x[myId] += x[myId+n];
}
}
__global__ void reduce_sum(uint n, float *x) {
reduce_sum_dev(n, x);
}
/************************
* logistic map
************************/
__global__ void logmap(float *x, int n, int m) {
uint i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n)
for(int j = 0; j < m; j++)
x[i] = ALPHA * x[i] * (1.0f - x[i]);
}
void logmap_ref(float *x, int n, int m, float *z) {
memcpy(z, x, n*sizeof(float));
for(int j = 0; j < m; j++)
for(int i = 0; i < n; i++)
z[i] = ALPHA * z[i] * (1.0f - z[i]);
}
/************************
* norm calculation
************************/
__global__ void norm(float *x, int n, float *z) {
uint i = blockDim.x * blockIdx.x + threadIdx.x;
uint blockBase = blockDim.x * blockIdx.x;
uint m = min(blockDim.x, n - blockBase);
if (i < n)
x[i] = pow(x[i], 2);
__syncthreads();
reduce_sum_dev(m, &(x[blockBase]));
if (i < n && threadIdx.x == 0)
z[blockIdx.x] = sqrt(x[i]);
}
float norm_ref(float *x, int n) {
float sum = 0.0;
for(int i = 0; i < n; i++)
sum += pow(x[i],2);
return sqrt(sum);
}
/************************
* perc
************************/
__global__ void setup_kernel(uint n, curandState *state) {
uint myId = blockDim.x * blockIdx.x + threadIdx.x;
if(myId < n)
curand_init(1234, myId, 0, &state[myId]);
}
__global__ void rndm(uint *a, int m, curandState *state) {
uint i = blockDim.x * blockIdx.x + threadIdx.x;
for(int j = 0; j < m; j++)
a[i*m + j] = curand_uniform(&state[i])*1000;
}
/*****************************************************
* print_vec: print the first few elements of a vector
******************************************************/
void print_vec(float *x, uint n, const char *fmt, const char *who) {
printf("%s = ", who);
for(int i = 0; i < n; i++) {
if(i > 0) printf(", ");
printf(fmt, x[i]);
}
if(n > 10) printf(", ...");
printf("\n");
}
void print_vec(uint *x, uint n, const char *fmt, const char *who) {
printf("%s = ", who);
for(int i = 0; i < n; i++) {
if(i > 0) printf(", ");
printf(fmt, x[i]);
}
if(n > 10) printf(", ...");
printf("\n");
}
/*****************************************************
* near(x, y): true if x and y are "nearly" equal
******************************************************/
int near(uint n, float x, float y) {
return(abs(x-y) < max(10.0, sqrt((float)n))*1.0e-7*max(1.0, max(abs(x), abs(y))));
}
int main(int argc, char **argv) {
uint n = (argc >= 2) ? atoi(argv[1]) : 1000000;
uint nn = n;
uint what = (argc >= 3) ? atoi(argv[2]) : DEFAULT_TEST;
// m for LOGMAP
uint m = (argc >= 4 && what == LOGMAP) ? atoi(argv[3]) : 0;
float *x, *y, *z, *z_ref;
float *dev_x, *dev_y, *dev_z;
uint *a, *dev_a;
curandState *dev_randState;
cudaDeviceProp prop;
int ndev;
cudaGetDeviceCount(&ndev);
if(ndev < 1) {
fprintf(stderr, "No CUDA device found\n");
exit(-1);
}
cudaGetDeviceProperties(&prop, 0);
int size = n*sizeof(float);
x = (float *)malloc(size);
y = (float *)malloc(size);
z = (float *)malloc(size);
a = (uint *)malloc(size);
z_ref = (float *)malloc(size);
// Use a logistic map to make some pseudo-random numbers
// It's fast, but the distribution isn't very uniform, and
// the other statistical properties are lousy. But it's
// fast, and that's all we need for some simple tests.
x[0] = 0.123;
y[0] = sqrt(0.3);
for(int i = 1; i < n; i++) {
x[i] = 3.8*x[i-1]*(1.0 - x[i-1]);
y[i] = 3.9*y[i-1]*(1.0 - y[i-1]);
}
printf("The GPU is a %s\n", prop.name);
printf("Cuda capability %d.%d.\n", prop.major, prop.minor);
print_vec(x, min(10, n), "%5.3f", "x");
print_vec(y, min(10, n), "%5.3f", "y");
cudaMalloc((void**)(&dev_x), size);
cudaMalloc((void**)(&dev_y), size);
cudaMalloc((void**)(&dev_z), size);
cudaMalloc((void**)(&dev_a), size);
cudaMemcpy(dev_x, x, size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_y, y, size, cudaMemcpyHostToDevice);
cudaMalloc((void **)(&dev_randState), n*sizeof(curandState));
switch(what) {
case LOGMAP:
logmap<<<ceil(n/256.0),256>>>(dev_x, n, m);
printf("a: size = %d, z=%016llx dev_x=%016llx\n", size, z, dev_x);
cudaMemcpy(z, dev_x, size, cudaMemcpyDeviceToHost);
printf("b\n");
logmap_ref(x, n, m, z_ref);
break;
case NORM:
norm<<<ceil(n/1024.0),1024>>>(dev_x, n, dev_z);
if (ceil(n/1024.0) > 1)
norm<<<1,ceil(n/1024.0)>>>(dev_z, ceil(n/1024.0), dev_z);
cudaMemcpy(z, dev_z, size, cudaMemcpyDeviceToHost);
z_ref[0] = norm_ref(x, n);
nn = 1;
break;
case PERC:
setup_kernel<<<ceil(n/1024.0),1024>>>(n, dev_randState);
rndm<<<ceil(n/1024.0),1024>>>(dev_a, 1, dev_randState);
cudaMemcpy(a, dev_a, size, cudaMemcpyDeviceToHost);
print_vec(a, min(10, nn), "%d", "a");
break;
default:
fprintf(stderr, "ERROR: unknown test case -- %d\n", what);
exit(-1);
}
for(int i = 0; i < nn; i++) { // check the result
if(!near(n, z[i], z_ref[i])) {
fprintf(stderr, "ERROR: i=%d: z[i] = %15.10f, z_ref[i] = %15.10f\n", i, z[i], z_ref[i]);
exit(-1);
}
}
print_vec(z, min(10, nn), "%5.3f", "z");
printf("The results match!\n");
cudaFree(dev_x);
cudaFree(dev_y);
cudaFree(dev_z);
free(x);
free(y);
free(z);
free(z_ref);
exit(0);
}
|
61ffec7a26ef5c2c89b23c048798a2d186a63c4c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <loss.hpp>
#include <utils.cuh>
#include <vector>
namespace HugeCTR {
namespace {
template <typename T>
__forceinline__ __device__ void atomic_global_sum_div(T val, T *acc, float div) {
val = warpReduceSum(val);
if (threadIdx.x % warpSize == 0) {
atomicAdd(acc, (T)(val / div));
}
return;
}
} // namespace
template <typename T>
Loss<T>::Loss(const Tensor2<float> &label_tensor, const Tensor2<T> &input_tensor,
const Tensor2<float> &loss_tensor, const std::shared_ptr<Regularizer<T>> ®ularizer,
const std::shared_ptr<GPUResource> &gpu_resource, int total_gpu_count, float scaler)
: regularizer_(regularizer),
gpu_resource_(gpu_resource),
total_gpu_count_(total_gpu_count),
scaler_(scaler) {
label_tensors_.push_back(label_tensor);
input_tensors_.push_back(input_tensor);
loss_tensors_.push_back(loss_tensor);
}
template <typename T>
void Loss<T>::compute(bool is_train) {
Tensor2<T> &input_tensor = get_input_tensors(is_train)[0];
const auto &input_dim = input_tensor.get_dimensions();
int batch_size = input_dim[0];
compute(is_train, batch_size);
}
// Note: current_batchsize here is the batchsize on this device
template <typename T>
void Loss<T>::compute(bool is_train, long long current_batchsize) {
CudaDeviceContext context(get_device_id());
PROFILE_RECORD("compute.start", get_gpu().get_stream(), false);
Tensor2<T> &input_tensor = get_input_tensors(is_train)[0];
const Tensor2<float> &label_tensor = get_label_tensors(is_train)[0];
Tensor2<float> &loss_tensor = loss_tensors_[0];
const auto &input_dim = input_tensor.get_dimensions();
const auto &label_dim = label_tensor.get_dimensions();
int batch_size = input_dim[0];
int feature_dim = input_dim[1];
T *input = input_tensor.get_ptr();
const float *label = label_tensor.get_ptr();
float *loss = loss_tensor.get_ptr();
float rterm = 0.0f;
if (regularizer_) {
regularizer_->compute_rterm();
rterm = regularizer_->get_rterm();
}
if (current_batchsize > batch_size && current_batchsize < 0) {
CK_THROW_(Error_t::WrongInput, "current_batchsize > batch_size && current_batchsize < 0");
}
do_compute(input, label, loss, current_batchsize, feature_dim, scaler_, rterm, is_train,
get_gpu().get_stream());
if (is_train) {
// once current_batchsize < batch_size in train we set the rest dgrad to 0
if (current_batchsize < batch_size) {
hipMemsetAsync(input + current_batchsize * feature_dim, 0,
(batch_size - current_batchsize) * feature_dim * sizeof(T),
get_gpu().get_stream());
}
}
if (is_train && regularizer_) {
regularizer_->initialize_wgrad();
}
PROFILE_RECORD("compute.stop", get_gpu().get_stream(), false);
#ifndef NDEBUG
hipDeviceSynchronize();
CK_CUDA_THROW_(hipGetLastError());
#endif
}
template <typename T>
CrossEntropyLoss<T>::CrossEntropyLoss(const Tensor2<float> &label_tensor,
const Tensor2<T> &input_tensor,
const Tensor2<float> &loss_tensor,
const std::shared_ptr<Regularizer<T>> ®ularizer,
const std::shared_ptr<GPUResource> &gpu_resource,
int total_gpu_count, float scaler)
: Loss<T>(label_tensor, input_tensor, loss_tensor, regularizer, gpu_resource, total_gpu_count,
scaler) {
const auto &input_dim = input_tensor.get_dimensions();
const auto &label_dim = label_tensor.get_dimensions();
int feature_dim = input_dim[1];
if (feature_dim != 2)
CK_THROW_(Error_t::WrongInput, "The feature dimension of CE loss input should be 2");
if (input_dim[0] != label_dim[0])
CK_THROW_(Error_t::WrongInput, "The batch sizes of input tensor and label tensor are not same");
}
// Suppose we use one thread to calculate one sample
template <typename T>
__global__ void CrossEntropy_Kernel(T *input, const float *label, float *cel_loss, int batch_size,
int total_gpu_count, int feature_dim, float scaler, float rterm,
bool is_train) {
int tid = threadIdx.x;
extern __shared__ float loss_s[];
loss_s[tid] = 0.0f;
float z0_exp, z1_exp, a0, a1;
int id1, id2;
for (int i = tid; i < batch_size; i += blockDim.x) {
id1 = i * feature_dim;
id2 = i * feature_dim + 1;
z0_exp = exp((double)input[id1]);
z1_exp = exp((double)input[id2]);
a0 = z0_exp / (z0_exp + z1_exp);
a1 = z1_exp / (z0_exp + z1_exp);
bool no_click = label[i] < 0.5f;
if (is_train) {
// calculate the grad
input[id1] = (a0 - (no_click ? 1.0f : 0.0f)) / batch_size * scaler / total_gpu_count;
input[id2] = (a1 - (!no_click ? 1.0f : 0.0f)) / batch_size * scaler / total_gpu_count;
}
loss_s[tid] += -1 * log(no_click ? a0 : a1);
}
__syncthreads();
float loss_tmp = 0.0f;
if (tid == 0) {
for (int i = 0; i < blockDim.x; ++i) loss_tmp += loss_s[i];
cel_loss[0] = loss_tmp / batch_size + rterm;
}
}
template <typename T>
void CrossEntropyLoss<T>::do_compute(T *input, const float *label, float *loss, int batch_size,
int feature_dim, float scaler, float rterm, bool is_train,
hipStream_t stream) {
int block_size = min(batch_size, 1024);
size_t smem_size = block_size * sizeof(float);
if (block_size > 0) {
hipLaunchKernelGGL(( CrossEntropy_Kernel), dim3(1), dim3(block_size), smem_size, stream, input, label, loss, batch_size,
Loss<T>::get_total_gpu_count(),
feature_dim, scaler, rterm, is_train);
}
}
template <typename T>
BinaryCrossEntropyLoss<T>::BinaryCrossEntropyLoss(
const Tensor2<float> &label_tensor, const Tensor2<T> &input_tensor,
const Tensor2<float> &loss_tensor, const std::shared_ptr<Regularizer<T>> ®ularizer,
const std::shared_ptr<GPUResource> &gpu_resource, int total_gpu_count, float scaler)
: Loss<T>(label_tensor, input_tensor, loss_tensor, regularizer, gpu_resource, total_gpu_count,
scaler) {
const auto &input_dim = input_tensor.get_dimensions();
int feature_dim = input_dim[1];
if (feature_dim != 1)
CK_THROW_(Error_t::WrongInput, "The feature dimension of BCE loss input should be 1");
}
// Suppose we use one thread to calculate one sample
template <typename T>
__global__ void BinaryCrossEntropy_Kernel(T *input, const float *label, float *bce_loss,
float scaler, int batch_size, int total_gpu_count,
float rterm, bool is_train) {
int tid = threadIdx.x;
extern __shared__ float loss_s[];
loss_s[tid] = 0.0f;
for (int i = tid; i < batch_size; i += blockDim.x) {
const float x = input[i];
const float y = label[i];
if (x >= 0) {
float exp_neg_x = exp(-x);
loss_s[tid] += x * (1 - y) + log(1 + exp_neg_x);
input[i] = is_train ? ((1 - y) - exp_neg_x / (1 + exp_neg_x)) * scaler / (float)batch_size /
total_gpu_count
: 1 / (1 + exp_neg_x);
} else {
float exp_x = exp(x);
loss_s[tid] += -x * y + log(1 + exp_x);
input[i] = is_train
? (-y + exp_x / (1 + exp_x)) * scaler / (float)batch_size / total_gpu_count
: exp_x / (exp_x + 1);
}
}
__syncthreads();
float loss_tmp = 0.0f;
if (tid == 0) {
for (int i = 0; i < blockDim.x; ++i) loss_tmp += loss_s[i];
bce_loss[0] = loss_tmp / batch_size + rterm;
}
}
template <typename T>
void BinaryCrossEntropyLoss<T>::do_compute(T *input, const float *label, float *loss,
int batch_size, int feature_dim, float scaler,
float rterm, bool is_train, hipStream_t stream) {
int block_size = min(batch_size, 1024);
size_t smem_size = block_size * sizeof(float);
if (block_size > 0) {
hipLaunchKernelGGL(( BinaryCrossEntropy_Kernel), dim3(1), dim3(block_size), smem_size, stream,
input, label, loss, scaler, batch_size, Loss<T>::get_total_gpu_count(), rterm, is_train);
}
}
__forceinline__ __device__ __host__ float cross_entropy_loss(float x, float y) {
float loss = 0.f;
if (x >= 0) {
float exp_neg_x = exp(-x);
loss = x * (1 - y) + log(1 + exp_neg_x);
} else {
float exp_x = exp(x);
loss = -x * y + log(1 + exp_x);
}
return -loss;
}
__forceinline__ __device__ __host__ float cross_entropy_loss_backward(float x, float y) {
float grad = 0.f;
if (x >= 0) {
float exp_neg_x = exp(-x);
grad = ((1 - y) - exp_neg_x / (1 + exp_neg_x));
} else {
float exp_x = exp(x);
grad = (-y + exp_x / (1 + exp_x));
}
return grad;
}
template <typename T>
__global__ void MultiCrossEntropy_Kernel(T *input, const float *label, const float *target_weight,
float *bce_loss, int batchsize, int total_gpu_count,
int labels_per_sample, float scaler, float rterm,
bool is_train) {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int num_threads = blockDim.x * gridDim.x;
float loss_s = 0.f;
const int size = batchsize * labels_per_sample;
for (int i = tid; i < size; i += num_threads) {
int target_weight_idx = i % labels_per_sample;
const float x = input[i];
const float y = label[i];
float loss =
(label[i] < -0.5) ? 0.f : (target_weight[target_weight_idx] * cross_entropy_loss(x, y));
loss_s += loss;
if (is_train) {
input[i] = (label[i] < -0.5)
? 0.f
: (target_weight[target_weight_idx] * cross_entropy_loss_backward(x, y) /
size * scaler / total_gpu_count);
}
}
atomic_global_sum_div(-loss_s, bce_loss, size);
if (tid == 0) {
atomicAdd(bce_loss, rterm);
}
return;
}
template <typename T>
void MultiCrossEntropyLoss<T>::do_compute(T *input, const float *label, float *loss, int batch_size,
int feature_dim, float scaler, float rterm, bool is_train,
hipStream_t stream) {
int labels_per_sample = feature_dim;
hipMemsetAsync(loss, 0, Loss<T>::get_loss_tensors()[0].get_size_in_bytes(), stream);
const int BLOCK_SIZE = 256;
const int GRID_SIZE = min(40, (batch_size * labels_per_sample - 1) / BLOCK_SIZE);
float *target_weight = target_weight_.get_ptr();
hipLaunchKernelGGL(( MultiCrossEntropy_Kernel), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, stream,
input, label, target_weight, loss, batch_size, Loss<T>::get_total_gpu_count(),
labels_per_sample, scaler, rterm, is_train);
}
template <typename T>
MultiCrossEntropyLoss<T>::MultiCrossEntropyLoss(const Tensor2<float> &label_tensor,
const Tensor2<T> &input_tensor,
const Tensor2<float> &loss_tensor,
const std::shared_ptr<Regularizer<T>> ®ularizer,
const std::vector<float> &target_weight,
const std::shared_ptr<GPUResource> &gpu_resource,
int total_gpu_count, float scaler)
: Loss<T>(label_tensor, input_tensor, loss_tensor, regularizer, gpu_resource, total_gpu_count,
scaler) {
if (label_tensor.get_dimensions().size() != 2 || input_tensor.get_dimensions().size() != 2 ||
label_tensor.get_dimensions()[0] != input_tensor.get_dimensions()[0] ||
label_tensor.get_dimensions()[1] != input_tensor.get_dimensions()[1]) {
CK_THROW_(Error_t::WrongInput, "Format of input tensor and label tensor don't match");
}
// verify the length of target_weight
if (target_weight.size() != input_tensor.get_dimensions()[1]) {
CK_THROW_(Error_t::WrongInput, "target_weight.size() != input_tensor.get_dims()[0]");
}
// load target_weight to internal Tensor
std::shared_ptr<GeneralBuffer2<CudaAllocator>> internal_buff =
GeneralBuffer2<CudaAllocator>::create();
std::vector<size_t> twdim = {1, label_tensor.get_dimensions()[1]};
internal_buff->reserve(twdim, &target_weight_);
CudaDeviceContext context(Loss<T>::get_device_id());
internal_buff->allocate();
CK_CUDA_THROW_(hipMemcpy(target_weight_.get_ptr(), target_weight.data(),
target_weight_.get_size_in_bytes(), hipMemcpyHostToDevice));
return;
}
template class Loss<__half>;
template class Loss<float>;
template class MultiCrossEntropyLoss<__half>;
template class MultiCrossEntropyLoss<float>;
template class CrossEntropyLoss<__half>;
template class CrossEntropyLoss<float>;
template class BinaryCrossEntropyLoss<__half>;
template class BinaryCrossEntropyLoss<float>;
} // namespace HugeCTR
|
61ffec7a26ef5c2c89b23c048798a2d186a63c4c.cu
|
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <loss.hpp>
#include <utils.cuh>
#include <vector>
namespace HugeCTR {
namespace {
template <typename T>
__forceinline__ __device__ void atomic_global_sum_div(T val, T *acc, float div) {
val = warpReduceSum(val);
if (threadIdx.x % warpSize == 0) {
atomicAdd(acc, (T)(val / div));
}
return;
}
} // namespace
template <typename T>
Loss<T>::Loss(const Tensor2<float> &label_tensor, const Tensor2<T> &input_tensor,
const Tensor2<float> &loss_tensor, const std::shared_ptr<Regularizer<T>> ®ularizer,
const std::shared_ptr<GPUResource> &gpu_resource, int total_gpu_count, float scaler)
: regularizer_(regularizer),
gpu_resource_(gpu_resource),
total_gpu_count_(total_gpu_count),
scaler_(scaler) {
label_tensors_.push_back(label_tensor);
input_tensors_.push_back(input_tensor);
loss_tensors_.push_back(loss_tensor);
}
template <typename T>
void Loss<T>::compute(bool is_train) {
Tensor2<T> &input_tensor = get_input_tensors(is_train)[0];
const auto &input_dim = input_tensor.get_dimensions();
int batch_size = input_dim[0];
compute(is_train, batch_size);
}
// Note: current_batchsize here is the batchsize on this device
template <typename T>
void Loss<T>::compute(bool is_train, long long current_batchsize) {
CudaDeviceContext context(get_device_id());
PROFILE_RECORD("compute.start", get_gpu().get_stream(), false);
Tensor2<T> &input_tensor = get_input_tensors(is_train)[0];
const Tensor2<float> &label_tensor = get_label_tensors(is_train)[0];
Tensor2<float> &loss_tensor = loss_tensors_[0];
const auto &input_dim = input_tensor.get_dimensions();
const auto &label_dim = label_tensor.get_dimensions();
int batch_size = input_dim[0];
int feature_dim = input_dim[1];
T *input = input_tensor.get_ptr();
const float *label = label_tensor.get_ptr();
float *loss = loss_tensor.get_ptr();
float rterm = 0.0f;
if (regularizer_) {
regularizer_->compute_rterm();
rterm = regularizer_->get_rterm();
}
if (current_batchsize > batch_size && current_batchsize < 0) {
CK_THROW_(Error_t::WrongInput, "current_batchsize > batch_size && current_batchsize < 0");
}
do_compute(input, label, loss, current_batchsize, feature_dim, scaler_, rterm, is_train,
get_gpu().get_stream());
if (is_train) {
// once current_batchsize < batch_size in train we set the rest dgrad to 0
if (current_batchsize < batch_size) {
cudaMemsetAsync(input + current_batchsize * feature_dim, 0,
(batch_size - current_batchsize) * feature_dim * sizeof(T),
get_gpu().get_stream());
}
}
if (is_train && regularizer_) {
regularizer_->initialize_wgrad();
}
PROFILE_RECORD("compute.stop", get_gpu().get_stream(), false);
#ifndef NDEBUG
cudaDeviceSynchronize();
CK_CUDA_THROW_(cudaGetLastError());
#endif
}
template <typename T>
CrossEntropyLoss<T>::CrossEntropyLoss(const Tensor2<float> &label_tensor,
const Tensor2<T> &input_tensor,
const Tensor2<float> &loss_tensor,
const std::shared_ptr<Regularizer<T>> ®ularizer,
const std::shared_ptr<GPUResource> &gpu_resource,
int total_gpu_count, float scaler)
: Loss<T>(label_tensor, input_tensor, loss_tensor, regularizer, gpu_resource, total_gpu_count,
scaler) {
const auto &input_dim = input_tensor.get_dimensions();
const auto &label_dim = label_tensor.get_dimensions();
int feature_dim = input_dim[1];
if (feature_dim != 2)
CK_THROW_(Error_t::WrongInput, "The feature dimension of CE loss input should be 2");
if (input_dim[0] != label_dim[0])
CK_THROW_(Error_t::WrongInput, "The batch sizes of input tensor and label tensor are not same");
}
// Suppose we use one thread to calculate one sample
template <typename T>
__global__ void CrossEntropy_Kernel(T *input, const float *label, float *cel_loss, int batch_size,
int total_gpu_count, int feature_dim, float scaler, float rterm,
bool is_train) {
int tid = threadIdx.x;
extern __shared__ float loss_s[];
loss_s[tid] = 0.0f;
float z0_exp, z1_exp, a0, a1;
int id1, id2;
for (int i = tid; i < batch_size; i += blockDim.x) {
id1 = i * feature_dim;
id2 = i * feature_dim + 1;
z0_exp = exp((double)input[id1]);
z1_exp = exp((double)input[id2]);
a0 = z0_exp / (z0_exp + z1_exp);
a1 = z1_exp / (z0_exp + z1_exp);
bool no_click = label[i] < 0.5f;
if (is_train) {
// calculate the grad
input[id1] = (a0 - (no_click ? 1.0f : 0.0f)) / batch_size * scaler / total_gpu_count;
input[id2] = (a1 - (!no_click ? 1.0f : 0.0f)) / batch_size * scaler / total_gpu_count;
}
loss_s[tid] += -1 * log(no_click ? a0 : a1);
}
__syncthreads();
float loss_tmp = 0.0f;
if (tid == 0) {
for (int i = 0; i < blockDim.x; ++i) loss_tmp += loss_s[i];
cel_loss[0] = loss_tmp / batch_size + rterm;
}
}
template <typename T>
void CrossEntropyLoss<T>::do_compute(T *input, const float *label, float *loss, int batch_size,
int feature_dim, float scaler, float rterm, bool is_train,
cudaStream_t stream) {
int block_size = min(batch_size, 1024);
size_t smem_size = block_size * sizeof(float);
if (block_size > 0) {
CrossEntropy_Kernel<<<1, block_size, smem_size, stream>>>(input, label, loss, batch_size,
Loss<T>::get_total_gpu_count(),
feature_dim, scaler, rterm, is_train);
}
}
template <typename T>
BinaryCrossEntropyLoss<T>::BinaryCrossEntropyLoss(
const Tensor2<float> &label_tensor, const Tensor2<T> &input_tensor,
const Tensor2<float> &loss_tensor, const std::shared_ptr<Regularizer<T>> ®ularizer,
const std::shared_ptr<GPUResource> &gpu_resource, int total_gpu_count, float scaler)
: Loss<T>(label_tensor, input_tensor, loss_tensor, regularizer, gpu_resource, total_gpu_count,
scaler) {
const auto &input_dim = input_tensor.get_dimensions();
int feature_dim = input_dim[1];
if (feature_dim != 1)
CK_THROW_(Error_t::WrongInput, "The feature dimension of BCE loss input should be 1");
}
// Suppose we use one thread to calculate one sample
template <typename T>
__global__ void BinaryCrossEntropy_Kernel(T *input, const float *label, float *bce_loss,
float scaler, int batch_size, int total_gpu_count,
float rterm, bool is_train) {
int tid = threadIdx.x;
extern __shared__ float loss_s[];
loss_s[tid] = 0.0f;
for (int i = tid; i < batch_size; i += blockDim.x) {
const float x = input[i];
const float y = label[i];
if (x >= 0) {
float exp_neg_x = exp(-x);
loss_s[tid] += x * (1 - y) + log(1 + exp_neg_x);
input[i] = is_train ? ((1 - y) - exp_neg_x / (1 + exp_neg_x)) * scaler / (float)batch_size /
total_gpu_count
: 1 / (1 + exp_neg_x);
} else {
float exp_x = exp(x);
loss_s[tid] += -x * y + log(1 + exp_x);
input[i] = is_train
? (-y + exp_x / (1 + exp_x)) * scaler / (float)batch_size / total_gpu_count
: exp_x / (exp_x + 1);
}
}
__syncthreads();
float loss_tmp = 0.0f;
if (tid == 0) {
for (int i = 0; i < blockDim.x; ++i) loss_tmp += loss_s[i];
bce_loss[0] = loss_tmp / batch_size + rterm;
}
}
template <typename T>
void BinaryCrossEntropyLoss<T>::do_compute(T *input, const float *label, float *loss,
int batch_size, int feature_dim, float scaler,
float rterm, bool is_train, cudaStream_t stream) {
int block_size = min(batch_size, 1024);
size_t smem_size = block_size * sizeof(float);
if (block_size > 0) {
BinaryCrossEntropy_Kernel<<<1, block_size, smem_size, stream>>>(
input, label, loss, scaler, batch_size, Loss<T>::get_total_gpu_count(), rterm, is_train);
}
}
__forceinline__ __device__ __host__ float cross_entropy_loss(float x, float y) {
float loss = 0.f;
if (x >= 0) {
float exp_neg_x = exp(-x);
loss = x * (1 - y) + log(1 + exp_neg_x);
} else {
float exp_x = exp(x);
loss = -x * y + log(1 + exp_x);
}
return -loss;
}
__forceinline__ __device__ __host__ float cross_entropy_loss_backward(float x, float y) {
float grad = 0.f;
if (x >= 0) {
float exp_neg_x = exp(-x);
grad = ((1 - y) - exp_neg_x / (1 + exp_neg_x));
} else {
float exp_x = exp(x);
grad = (-y + exp_x / (1 + exp_x));
}
return grad;
}
template <typename T>
__global__ void MultiCrossEntropy_Kernel(T *input, const float *label, const float *target_weight,
float *bce_loss, int batchsize, int total_gpu_count,
int labels_per_sample, float scaler, float rterm,
bool is_train) {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int num_threads = blockDim.x * gridDim.x;
float loss_s = 0.f;
const int size = batchsize * labels_per_sample;
for (int i = tid; i < size; i += num_threads) {
int target_weight_idx = i % labels_per_sample;
const float x = input[i];
const float y = label[i];
float loss =
(label[i] < -0.5) ? 0.f : (target_weight[target_weight_idx] * cross_entropy_loss(x, y));
loss_s += loss;
if (is_train) {
input[i] = (label[i] < -0.5)
? 0.f
: (target_weight[target_weight_idx] * cross_entropy_loss_backward(x, y) /
size * scaler / total_gpu_count);
}
}
atomic_global_sum_div(-loss_s, bce_loss, size);
if (tid == 0) {
atomicAdd(bce_loss, rterm);
}
return;
}
template <typename T>
void MultiCrossEntropyLoss<T>::do_compute(T *input, const float *label, float *loss, int batch_size,
int feature_dim, float scaler, float rterm, bool is_train,
cudaStream_t stream) {
int labels_per_sample = feature_dim;
cudaMemsetAsync(loss, 0, Loss<T>::get_loss_tensors()[0].get_size_in_bytes(), stream);
const int BLOCK_SIZE = 256;
const int GRID_SIZE = min(40, (batch_size * labels_per_sample - 1) / BLOCK_SIZE);
float *target_weight = target_weight_.get_ptr();
MultiCrossEntropy_Kernel<<<GRID_SIZE, BLOCK_SIZE, 0, stream>>>(
input, label, target_weight, loss, batch_size, Loss<T>::get_total_gpu_count(),
labels_per_sample, scaler, rterm, is_train);
}
template <typename T>
MultiCrossEntropyLoss<T>::MultiCrossEntropyLoss(const Tensor2<float> &label_tensor,
const Tensor2<T> &input_tensor,
const Tensor2<float> &loss_tensor,
const std::shared_ptr<Regularizer<T>> ®ularizer,
const std::vector<float> &target_weight,
const std::shared_ptr<GPUResource> &gpu_resource,
int total_gpu_count, float scaler)
: Loss<T>(label_tensor, input_tensor, loss_tensor, regularizer, gpu_resource, total_gpu_count,
scaler) {
if (label_tensor.get_dimensions().size() != 2 || input_tensor.get_dimensions().size() != 2 ||
label_tensor.get_dimensions()[0] != input_tensor.get_dimensions()[0] ||
label_tensor.get_dimensions()[1] != input_tensor.get_dimensions()[1]) {
CK_THROW_(Error_t::WrongInput, "Format of input tensor and label tensor don't match");
}
// verify the length of target_weight
if (target_weight.size() != input_tensor.get_dimensions()[1]) {
CK_THROW_(Error_t::WrongInput, "target_weight.size() != input_tensor.get_dims()[0]");
}
// load target_weight to internal Tensor
std::shared_ptr<GeneralBuffer2<CudaAllocator>> internal_buff =
GeneralBuffer2<CudaAllocator>::create();
std::vector<size_t> twdim = {1, label_tensor.get_dimensions()[1]};
internal_buff->reserve(twdim, &target_weight_);
CudaDeviceContext context(Loss<T>::get_device_id());
internal_buff->allocate();
CK_CUDA_THROW_(cudaMemcpy(target_weight_.get_ptr(), target_weight.data(),
target_weight_.get_size_in_bytes(), cudaMemcpyHostToDevice));
return;
}
template class Loss<__half>;
template class Loss<float>;
template class MultiCrossEntropyLoss<__half>;
template class MultiCrossEntropyLoss<float>;
template class CrossEntropyLoss<__half>;
template class CrossEntropyLoss<float>;
template class BinaryCrossEntropyLoss<__half>;
template class BinaryCrossEntropyLoss<float>;
} // namespace HugeCTR
|
5b400582127268d4b96200cd9e83d8a7ac15350b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <math.h>
#include <complex.h>
#include <string.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <getopt.h>
#include <time.h>
#include "cuda_error.h"
#include "init.h"
#include "struct.h"
#include "settings.h"
#include "auxi.h"
#include "kernels.h"
#include "spline_z.h"
#include <hip/hip_runtime_api.h>
#include <hipfft.h>
/* Command line options handling: search
*/
void handle_opts( Search_settings *sett,
Command_line_opts *opts,
int argc,
char* argv[]) {
opts->hemi=0;
opts->wd=NULL;
// Default F-statistic threshold
opts->trl=20;
strcpy (opts->prefix, TOSTR(PREFIX));
strcpy (opts->dtaprefix, TOSTR(DTAPREFIX));
opts->label[0] = '\0';
opts->range[0] = '\0';
opts->getrange[0] = '\0';
opts->usedet[0] = '\0';
opts->addsig[0] = '\0';
// Initial value of starting frequency set to a negative quantity.
// If this is not changed by the command line value, fpo is calculated
// from the band number b (fpo = fpo = fstart + 0.96875*b/(2dt))
sett->fpo = -1;
// Default initial value of the data sampling time
sett->dt = 0.5;
opts->help_flag=0;
opts->white_flag=0;
opts->s0_flag=0;
opts->checkp_flag=0;
static int help_flag=0, white_flag=0, s0_flag=0, checkp_flag=1;
// Reading arguments
while (1) {
static struct option long_options[] = {
{"help", no_argument, &help_flag, 1},
{"whitenoise", no_argument, &white_flag, 1},
{"nospindown", no_argument, &s0_flag, 1},
{"nocheckpoint", no_argument, &checkp_flag, 0},
// frame number
{"ident", required_argument, 0, 'i'},
// frequency band number
{"band", required_argument, 0, 'b'},
// output directory
{"output", required_argument, 0, 'o'},
// input data directory
{"data", required_argument, 0, 'd'},
// non-standard label for naming files
{"label", required_argument, 0, 'l'},
// narrower grid range parameter file
{"range", required_argument, 0, 'r'},
// write full grid range to file
{"getrange", required_argument, 0, 'g'},
// change directory parameter
{"cwd", required_argument, 0, 'c'},
// interpolation method
{"threshold", required_argument, 0, 't'},
// hemisphere
{"hemisphere", required_argument, 0, 'h'},
// fpo value
{"fpo", required_argument, 0, 'p'},
// add signal parameters
{"addsig", required_argument, 0, 'x'},
// which detectors to use
{"usedet", required_argument, 0, 'u'},
// data sampling time
{"dt", required_argument, 0, 's'},
{0, 0, 0, 0}
};
if (help_flag) {
printf("polgraw-allsky periodic GWs: search for candidate signals with the F-statistic\n");
printf("Usage: ./search -[switch1] <value1> -[switch2] <value2> ...\n") ;
printf("Switches are:\n\n");
printf("-d, -data Data directory (default is .)\n");
printf("-o, -output Output directory (default is ./candidates)\n");
printf("-i, -ident Frame number\n");
printf("-b, -band Band number\n");
printf("-l, -label Custom label for the input and output files\n");
printf("-r, -range Use file with grid range or pulsar position\n");
printf("-g, -getrange Write grid ranges & exit (ignore -r)\n");
printf("-c, -cwd Change to directory <dir>\n");
printf("-t, -threshold Threshold for the F-statistic (default is 20)\n");
printf("-h, -hemisphere Hemisphere (default is 0 - does both)\n");
printf("-p, -fpo Reference band frequency fpo value\n");
printf("-s, -dt data sampling time dt (default value: 0.5)\n");
printf("-u, -usedet Use only detectors from string (default is use all available)\n");
printf("-x, -addsig Add signal with parameters from <file>\n\n");
printf("Also:\n\n");
printf("--whitenoise White Gaussian noise assumed\n");
printf("--nospindown Spindowns neglected\n");
printf("--nocheckpoint State file won't be created (no checkpointing)\n");
printf("--help This help\n");
exit(EXIT_SUCCESS);
}
int option_index = 0;
int c = getopt_long_only(argc, argv, "i:b:o:d:l:r:g:c:t:h:p:x:s:u:",
long_options, &option_index);
if (c == -1)
break;
switch (c) {
case 'i':
opts->ident = atoi (optarg);
break;
case 't':
opts->trl = atof(optarg);
break;
case 'h':
opts->hemi = atof(optarg);
break;
case 'b':
opts->band = atoi(optarg);
break;
case 'o':
strcpy(opts->prefix, optarg);
break;
case 'd':
strcpy(opts->dtaprefix, optarg);
break;
case 'l':
opts->label[0] = '_';
strcpy(1+opts->label, optarg);
break;
case 'r':
strcpy(opts->range, optarg);
break;
case 'g':
strcpy(opts->getrange, optarg);
break;
case 'c':
opts->wd = (char *) malloc (1+strlen(optarg));
strcpy(opts->wd, optarg);
break;
case 'p':
sett->fpo = atof(optarg);
break;
case 'x':
strcpy(opts->addsig, optarg);
break;
case 's':
sett->dt = atof(optarg);
break;
case 'u':
strcpy(opts->usedet, optarg);
break;
case '?':
break;
default:
break ;
} /* switch c */
} /* while 1 */
opts->white_flag = white_flag;
opts->s0_flag = s0_flag;
opts->checkp_flag = checkp_flag;
printf("Input data directory is %s\n", opts->dtaprefix);
printf("Output directory is %s\n", opts->prefix);
printf("Frame and band numbers are %d and %d\n", opts->ident, opts->band);
// Starting band frequency:
// fpo_val is optionally read from the command line
// Its initial value is set to -1
if(!(sett->fpo >= 0))
// The usual definition (multiplying the offset by B=1/(2dt))
// !!! in RDC_O1 the fstart equals 10, not 100 like in VSR1 !!!
//
sett->fpo = 10. + 0.96875*opts->band*(0.5/sett->dt);
printf("The reference frequency fpo is %f\n", sett->fpo);
printf("The data sampling time dt is %f\n", sett->dt);
if (opts->white_flag)
printf ("Assuming white Gaussian noise\n");
// For legacy: FFT is now the only option
printf ("Using fftinterp=FFT (FFT interpolation by zero-padding)\n");
if(opts->trl!=20)
printf ("Threshold for the F-statistic is %lf\n", opts->trl);
if(opts->hemi)
printf ("Search for hemisphere %d\n", opts->hemi);
if (opts->s0_flag)
printf ("Assuming s_1 = 0.\n");
if (strlen(opts->label))
printf ("Using '%s' as data label\n", opts->label);
if(strlen(opts->getrange)){
printf ("Writing full grid ranges to '%s'\n", opts->getrange);
if(strlen(opts->range)) {
opts->range[0] = '\0';
printf (" WARNING! -r option will be ignored...\n");
}
}
if (strlen(opts->range))
printf ("Obtaining grid range from '%s'\n", opts->range);
if (strlen(opts->addsig))
printf ("Adding signal from '%s'\n", opts->addsig);
if (opts->wd) {
printf ("Changing working directory to %s\n", opts->wd);
if (chdir(opts->wd)) { perror (opts->wd); abort (); }
}
} // end of command line options handling
/* Generate grid from the M matrix (grid.bin)
*/
void read_grid( Search_settings *sett,
Command_line_opts *opts) {
sett->M = (double *) calloc (16, sizeof (double));
FILE *data;
char filename[512];
// In case when -usedet option is used for one detector
// i.e. opts->usedet has a length of 2 (e.g. H1 or V1),
// read grid.bin from this detector subdirectory
// (see detectors_settings() in settings.c for details)
if(strlen(opts->usedet)==2)
sprintf (filename, "%s/%03d/%s/grid.bin", opts->dtaprefix, opts->ident, opts->usedet);
else
sprintf (filename, "%s/%03d/grid.bin", opts->dtaprefix, opts->ident);
if ((data=fopen (filename, "r")) != NULL) {
printf("Using grid file from %s\n", filename);
fread ((void *)&sett->fftpad, sizeof(int), 1, data);
printf("Using fftpad from the grid file: %d\n", sett->fftpad);
// M: vector of 16 components consisting of 4 rows
// of 4x4 grid-generating matrix
fread ((void *)sett->M, sizeof(double), 16, data);
fclose (data);
} else {
perror (filename);
exit(EXIT_FAILURE);
}
} // end of read grid
/* Array initialization */
void init_arrays( Search_settings *sett,
Command_line_opts *opts,
Aux_arrays *aux_arr,
double **F_d) {
int i, status;
// Allocates and initializes to zero the data, detector ephemeris
// and the F-statistic arrays
FILE *data;
sett->Ninterp = sett->interpftpad*sett->nfft;
sett->nfftf = sett->fftpad*sett->nfft;
for(i=0; i<sett->nifo; i++) {
/// ifo[i].sig.xDat = (double *) calloc(sett->N, sizeof(double));
/// mapped memory works for CUDART_VERSION >= 2020
/// we should test if it's available, if not copy data explicitly to device
CudaSafeCall( hipHostMalloc((void **)&(ifo[i].sig.xDat), sett->N*sizeof(double),
hipHostMallocMapped) );
CudaSafeCall( hipHostGetDevicePointer((void **)&(ifo[i].sig.xDat_d),
(void *)ifo[i].sig.xDat, 0) );
// Input time-domain data handling
//
// The file name ifo[i].xdatname is constructed
// in settings.c, while looking for the detector
// subdirectories
if((data = fopen(ifo[i].xdatname, "r")) != NULL) {
status = fread((void *)(ifo[i].sig.xDat),
sizeof(double), sett->N, data);
fclose (data);
} else {
perror (ifo[i].xdatname);
exit(EXIT_FAILURE);
}
int j, Nzeros=0;
// Checking for null values in the data
for(j=0; j < sett->N; j++)
if(!ifo[i].sig.xDat[j]) Nzeros++;
ifo[i].sig.Nzeros = Nzeros;
// factor N/(N - Nzeros) to account for null values in the data
ifo[i].sig.crf0 = (double)sett->N/(sett->N - ifo[i].sig.Nzeros);
// Estimation of the variance for each detector
ifo[i].sig.sig2 = (ifo[i].sig.crf0)*var(ifo[i].sig.xDat, sett->N);
CudaSafeCall( hipHostMalloc((void **)&(ifo[i].sig.DetSSB), 3*sett->N*sizeof(double),
hipHostMallocMapped) );
CudaSafeCall( hipHostGetDevicePointer((void **)&(ifo[i].sig.DetSSB_d),
(void *)ifo[i].sig.DetSSB, 0) );
// Ephemeris file handling
char filename[512];
sprintf (filename, "%s/%03d/%s/DetSSB.bin",
opts->dtaprefix, opts->ident, ifo[i].name);
if((data = fopen(filename, "r")) != NULL) {
// Detector position w.r.t Solar System Baricenter
// for every datapoint
status = fread((void *)(ifo[i].sig.DetSSB),
sizeof(double), 3*sett->N, data);
// Deterministic phase defining the position of the Earth
// in its diurnal motion at t=0
status = fread((void *)(&ifo[i].sig.phir),
sizeof(double), 1, data);
// Earth's axis inclination to the ecliptic at t=0
status = fread((void *)(&ifo[i].sig.epsm),
sizeof(double), 1, data);
fclose (data);
printf("Using %s as detector %s ephemerids...\n", filename, ifo[i].name);
} else {
perror (filename);
return ;
}
// sincos
ifo[i].sig.sphir = sin(ifo[i].sig.phir);
ifo[i].sig.cphir = cos(ifo[i].sig.phir);
ifo[i].sig.sepsm = sin(ifo[i].sig.epsm);
ifo[i].sig.cepsm = cos(ifo[i].sig.epsm);
sett->sepsm = ifo[i].sig.sepsm;
sett->cepsm = ifo[i].sig.cepsm;
CudaSafeCall( hipMalloc((void**)&ifo[i].sig.xDatma_d,
sizeof(hipfftDoubleComplex)*sett->N) );
CudaSafeCall( hipMalloc((void**)&ifo[i].sig.xDatmb_d,
sizeof(hipfftDoubleComplex)*sett->N) );
CudaSafeCall( hipMalloc((void**)&(ifo[i].sig.aa_d),
sizeof(double)*sett->N) );
CudaSafeCall( hipMalloc((void**)&(ifo[i].sig.bb_d),
sizeof(double)*sett->N) );
CudaSafeCall( hipMalloc((void**)&(ifo[i].sig.shft_d),
sizeof(double)*sett->N) );
CudaSafeCall( hipMalloc((void**)&(ifo[i].sig.shftf_d),
sizeof(double)*sett->N) );
} // end loop for detectors
// Check if the ephemerids have the same epsm parameter
for(i=1; i<sett->nifo; i++) {
if(!(ifo[i-1].sig.sepsm == ifo[i].sig.sepsm)) {
printf("The parameter epsm (DetSSB.bin) differs for detectors %s and %s. Aborting...\n", ifo[i-1].name, ifo[i].name);
exit(EXIT_FAILURE);
}
}
// if all is well with epsm, take the first value
sett->sepsm = ifo[0].sig.sepsm;
sett->cepsm = ifo[0].sig.cepsm;
// *F = (double *) calloc(2*sett->nfft, sizeof(double));
CudaSafeCall ( hipMalloc((void **)F_d, 2*sett->nfft*sizeof(double)));
// Auxiliary arrays, Earth's rotation
CudaSafeCall( hipMalloc((void**)&(aux_arr->t2_d),
sizeof(double)*sett->N) );
CudaSafeCall( hipMalloc((void**)&(aux_arr->cosmodf_d),
sizeof(double)*sett->N) );
CudaSafeCall( hipMalloc((void**)&(aux_arr->sinmodf_d),
sizeof(double)*sett->N) );
CudaSafeCall( hipMalloc((void**)&(aux_arr->tshift_d),
sizeof(double)*sett->N) );
init_spline_matrices(&aux_arr->diag_d, &aux_arr->ldiag_d, &aux_arr->udiag_d,
&aux_arr->B_d, sett->Ninterp);
hipLaunchKernelGGL(( compute_sincosmodf), dim3(sett->N/256+1),dim3(256), 0, 0, aux_arr->sinmodf_d, aux_arr->cosmodf_d,
sett->omr, sett->N);
} // end of init arrays
/* Search range */
void set_search_range( Search_settings *sett,
Command_line_opts *opts,
Search_range *s_range) {
// Hemispheres (with respect to the ecliptic)
if(opts->hemi) {
s_range->pmr[0] = opts->hemi;
s_range->pmr[1] = opts->hemi;
} else {
s_range->pmr[0] = 1;
s_range->pmr[1] = 2;
}
// If the parameter range is invoked, the search is performed
// within the range of grid parameters from an ascii file
// ("-r range_file" from the command line)
FILE *data;
if (strlen (opts->range)) {
if ((data=fopen (opts->range, "r")) != NULL) {
int aqq = fscanf (data, "%d %d %d %d %d %d %d %d",
s_range->spndr, 1+s_range->spndr, s_range->nr,
1+s_range->nr, s_range->mr, 1+s_range->mr,
s_range->pmr, 1+s_range->pmr);
if (aqq != 8) {
printf("Error when reading range file!\n");
exit(EXIT_FAILURE);
}
fclose (data);
} else {
perror (opts->range);
exit(EXIT_FAILURE);
}
} else {
// Establish the grid range in which the search will be performed
// with the use of the M matrix from grid.bin
gridr(
sett->M,
s_range->spndr,
s_range->nr,
s_range->mr,
sett->oms,
sett->Smax);
if (strlen(opts->getrange)) {
FILE *data;
if ((data=fopen (opts->getrange, "w")) != NULL) {
fprintf(data, "%d %d\n%d %d\n%d %d\n%d %d\n",
s_range->spndr[0], s_range->spndr[1],
s_range->nr[0], s_range->nr[1],
s_range->mr[0], s_range->mr[1],
s_range->pmr[0], s_range->pmr[1] );
printf("Wrote input data grid ranges to %s\n", opts->getrange);
fclose (data);
// exit(EXIT_SUCCESS);
} else {
printf("Can't open %s file for writing\n", opts->getrange);
exit(EXIT_FAILURE);
}
}
}
printf("set_search_range() - the grid ranges are maximally this:\n");
printf("(spndr, nr, mr, pmr pairs): %d %d %d %d %d %d %d %d\n", \
s_range->spndr[0], s_range->spndr[1], s_range->nr[0], s_range->nr[1],
s_range->mr[0], s_range->mr[1], s_range->pmr[0], s_range->pmr[1]);
printf("Smin: %le, -Smax: %le\n", sett->Smin, sett->Smax);
} // end of set search range
/* FFT Plans
*/
void plan_fft (Search_settings *sett,
// Command_line_opts *opts,
FFT_plans *plans,
FFT_arrays *fft_arr
// Aux_arrays *aux_arr
) {
// sett->Ninterp = sett->interpftpad*sett->nfft; //moved to init_arrays
fft_arr->arr_len = (sett->fftpad*sett->nfft > sett->Ninterp
? sett->fftpad*sett->nfft : sett->Ninterp);
CudaSafeCall ( hipMalloc((void **)&fft_arr->xa_d, 2*fft_arr->arr_len*sizeof(hipfftDoubleComplex)) );
fft_arr->xb_d = fft_arr->xa_d + fft_arr->arr_len;
// sett->nfftf = sett->fftpad*sett->nfft; // moved to init_arrays
// no need for plans '2' - dimaensions are the same
hipfftPlan1d( &(plans->plan), sett->nfftf, HIPFFT_Z2Z, 1);
hipfftPlan1d( &(plans->pl_int), sett->nfft, HIPFFT_Z2Z, 1);
hipfftPlan1d( &(plans->pl_inv), sett->Ninterp, HIPFFT_Z2Z, 1);
CudaSafeCall ( hipMalloc((void **)&fft_arr->xa_d, 2*fft_arr->arr_len*sizeof(hipfftDoubleComplex)) );
}
/* Checkpointing */
void read_checkpoints(Command_line_opts *opts,
Search_range *s_range,
int *FNum) {
if(opts->checkp_flag) {
// filename of checkpoint state file, depending on the hemisphere
if(opts->hemi)
sprintf(opts->qname, "state_%03d_%04d%s_%d.dat",
opts->ident, opts->band, opts->label, opts->hemi);
else
sprintf(opts->qname, "state_%03d_%04d%s.dat",
opts->ident, opts->band, opts->label);
FILE *state;
if((state = fopen(opts->qname, "r")) != NULL) {
// Scan the state file to get last recorded parameters
if((fscanf(state, "%d %d %d %d %d", &s_range->pst, &s_range->mst,
&s_range->nst, &s_range->sst, FNum)) == EOF) {
// This means that state file is empty (=end of the calculations)
fprintf (stderr, "State file empty: nothing to do...\n");
fclose (state);
return;
}
fclose (state);
// No state file - start from the beginning
} else {
s_range->pst = s_range->pmr[0];
s_range->mst = s_range->mr[0];
s_range->nst = s_range->nr[0];
s_range->sst = s_range->spndr[0];
*FNum = 0;
} // if state
} else {
s_range->pst = s_range->pmr[0];
s_range->mst = s_range->mr[0];
s_range->nst = s_range->nr[0];
s_range->sst = s_range->spndr[0];
*FNum = 0;
} // if checkp_flag
} // end reading checkpoints
/* Cleanup & memory free */
void cleanup(
Search_settings *sett,
Command_line_opts *opts,
Search_range *s_range,
FFT_plans *plans,
FFT_arrays *fft_arr,
Aux_arrays *aux,
double *F_d) {
int i;
for(i=0; i<sett->nifo; i++) {
CudaSafeCall( hipHostFree(ifo[i].sig.xDat) );
CudaSafeCall( hipHostFree(ifo[i].sig.DetSSB) );
CudaSafeCall( hipFree(ifo[i].sig.xDatma_d) );
CudaSafeCall( hipFree(ifo[i].sig.xDatmb_d) );
CudaSafeCall( hipFree(ifo[i].sig.aa_d) );
CudaSafeCall( hipFree(ifo[i].sig.bb_d) );
CudaSafeCall( hipFree(ifo[i].sig.shft_d) );
CudaSafeCall( hipFree(ifo[i].sig.shftf_d) );
}
CudaSafeCall( hipFree(aux->cosmodf_d) );
CudaSafeCall( hipFree(aux->sinmodf_d) );
CudaSafeCall( hipFree(aux->t2_d) );
CudaSafeCall( hipFree(F_d) );
CudaSafeCall( hipFree(fft_arr->xa_d) );
free(sett->M);
hipfftDestroy(plans->plan);
hipfftDestroy(plans->pl_int);
hipfftDestroy(plans->pl_inv);
} // end of cleanup & memory free
/* Command line options handling: coincidences */
void handle_opts_coinc( Search_settings *sett,
Command_line_opts_coinc *opts,
int argc,
char* argv[]) {
opts->wd=NULL;
strcpy (opts->prefix, TOSTR(PREFIX));
strcpy (opts->dtaprefix, TOSTR(DTAPREFIX));
// Default initial value of the data sampling time
sett->dt = 0.5;
opts->help_flag=0;
static int help_flag=0;
// Default value of the minimal number of coincidences
opts->mincoin=3;
// Default value of the narrow-down parameter
opts->narrowdown=0.5;
// Default value of the cell shift: 0000 (no shifts)
opts->shift=0;
// Default value of the cell scaling: 1111 (no scaling)
opts->scale=1111;
// Default signal-to-noise threshold cutoff
opts->snrcutoff=6;
// Reading arguments
while (1) {
static struct option long_options[] = {
{"help", no_argument, &help_flag, 1},
// Cell shifts
{"shift", required_argument, 0, 's'},
// Cell scaling
{"scale", required_argument, 0, 'z'},
// Reference frame number
{"refr", required_argument, 0, 'r'},
// output directory
{"output", required_argument, 0, 'o'},
// input data directory
{"data", required_argument, 0, 'd'},
// fpo value
{"fpo", required_argument, 0, 'p'},
// data sampling time
{"dt", required_argument, 0, 't'},
// triggers' name prefactor
{"trigname", required_argument, 0, 'e'},
// Location of the reference grid.bin and starting_date files
{"refloc", required_argument, 0, 'g'},
// Minimal number of coincidences recorded in the output
{"mincoin", required_argument, 0, 'm'},
// Narrow down the frequency band (+- the center of band)
{"narrowdown", required_argument, 0, 'n'},
// Signal-to-noise threshold cutoff
{"snrcutoff", required_argument, 0, 'c'},
{0, 0, 0, 0}
};
if (help_flag) {
printf("polgraw-allsky periodic GWs: search for concidences among candidates\n");
printf("Usage: ./coincidences -[switch1] <value1> -[switch2] <value2> ...\n") ;
printf("Switches are:\n\n");
printf("-data Data directory (default is ./candidates)\n");
printf("-output Output directory (default is ./coinc-results)\n");
printf("-shift Cell shifts in fsda directions (4 digit number, e.g. 0101, default 0000)\n");
printf("-scale Cell scaling in fsda directions (4 digit number, e.g. 4824, default 1111)\n");
printf("-refr Reference frame number\n");
printf("-fpo Reference band frequency fpo value\n");
printf("-dt Data sampling time dt (default value: 0.5)\n");
printf("-trigname Part of triggers' name (for identifying files)\n");
printf("-refloc Location of the reference grid.bin and starting_date files\n");
printf("-mincoin Minimal number of coincidences recorded\n");
printf("-narrowdown Narrow-down the frequency band (range [0, 0.5] +- around center)\n");
printf("-snrcutoff Signal-to-noise threshold cutoff (default value: 6)\n\n");
printf("Also:\n\n");
printf("--help This help\n");
exit (0);
}
int option_index = 0;
int c = getopt_long_only (argc, argv, "p:o:d:s:z:r:t:e:g:m:n:c:", long_options, &option_index);
if (c == -1)
break;
switch (c) {
case 'p':
sett->fpo = atof(optarg);
break;
case 's': // Cell shifts
opts->shift = atof(optarg);
break;
case 'z': // Cell scaling
opts->scale = atoi(optarg);
break;
case 'r':
opts->refr = atoi(optarg);
break;
case 'o':
strcpy(opts->prefix, optarg);
break;
case 'd':
strcpy(opts->dtaprefix, optarg);
break;
case 't':
sett->dt = atof(optarg);
break;
case 'e':
strcpy(opts->trigname, optarg);
break;
case 'g':
strcpy(opts->refloc, optarg);
break;
case 'm':
opts->mincoin = atoi(optarg);
break;
case 'n':
opts->narrowdown = atof(optarg);
break;
case 'c':
opts->snrcutoff = atof(optarg);
break;
case '?':
break;
default:
break ;
} /* switch c */
} /* while 1 */
// Putting the parameter in triggers' frequency range [0, pi]
opts->narrowdown *= M_PI;
printf("#mb add info at the beginning...\n");
printf("The SNR threshold cutoff is %.12f, ", opts->snrcutoff);
printf("corresponding to F-statistic value of %.12f\n",
pow(opts->snrcutoff, 2)/2. + 2);
} // end of command line options handling: coincidences
#if 0
/* Manage grid matrix (read from grid.bin, find eigenvalues
* and eigenvectors) and reference GPS time from starting_time
* (expected to be in the same directory)
*/
void manage_grid_matrix(
Search_settings *sett,
Command_line_opts_coinc *opts) {
sett->M = (double *)calloc(16, sizeof (double));
FILE *data;
char filename[512];
sprintf (filename, "%s/grid.bin", opts->refloc);
if ((data=fopen (filename, "r")) != NULL) {
printf("Reading the reference grid.bin at %s\n", opts->refloc);
fread ((void *)&sett->fftpad, sizeof (int), 1, data);
printf("fftpad from the grid file: %d\n", sett->fftpad);
fread ((void *)sett->M, sizeof(double), 16, data);
// We actually need the second (Fisher) matrix from grid.bin,
// hence the second fread:
fread ((void *)sett->M, sizeof(double), 16, data);
fclose (data);
} else {
perror (filename);
exit(EXIT_FAILURE);
}
/* //#mb seems not needed at the moment
sprintf (filename, "%s/starting_date", opts->refloc);
if ((data=fopen (filename, "r")) != NULL) {
fscanf(data, "%le", &opts->refgps);
printf("Reading the reference starting_date file at %s The GPS time is %12f\n", opts->refloc, opts->refgps);
fclose (data);
} else {
perror (filename);
exit(EXIT_FAILURE);
}
*/
// Calculating the eigenvectors and eigenvalues
gsl_matrix_view m = gsl_matrix_view_array(sett->M, 4, 4);
gsl_vector *eval = gsl_vector_alloc(4);
gsl_matrix *evec = gsl_matrix_alloc(4, 4);
gsl_eigen_symmv_workspace *w = gsl_eigen_symmv_alloc(4);
gsl_eigen_symmv(&m.matrix, eval, evec, w);
gsl_eigen_symmv_free(w);
double eigval[4], eigvec[4][4];
// Saving the results to the settings struct sett->vedva[][]
{ int i, j;
for(i=0; i<4; i++) {
eigval[i] = gsl_vector_get(eval, i);
gsl_vector_view evec_i = gsl_matrix_column(evec, i);
for(j=0; j<4; j++)
eigvec[j][i] = gsl_vector_get(&evec_i.vector, j);
}
// This is an auxiliary matrix composed of the eigenvector
// columns multiplied by a matrix with sqrt(eigenvalues) on diagonal
for(i=0; i<4; i++) {
for(j=0; j<4; j++) {
sett->vedva[i][j] = eigvec[i][j]*sqrt(eigval[j]);
// printf("%.12le ", sett->vedva[i][j]);
}
// printf("\n");
}
}
/*
//#mb matrix generated in matlab, for tests
double _tmp[4][4] = {
{-2.8622034614137332e-001, -3.7566564762376159e-002, -4.4001551065376701e-012, -3.4516253934827171e-012},
{-2.9591999145463371e-001, 3.6335210834374479e-002, 8.1252443441098394e-014, -6.8170555119669981e-014},
{1.5497867603229576e-005, 1.9167007413107127e-006, 1.0599051611325639e-008, -5.0379548388381567e-008},
{2.4410008440913992e-005, 3.2886518554938671e-006, -5.7338464150027107e-008, -9.3126913365595100e-009},
};
{ int i,j;
for(i=0; i<4; i++)
for(j=0; j<4; j++)
sett->vedva[i][j] = _tmp[i][j];
}
printf("\n");
{ int i, j;
for(i=0; i<4; i++) {
for(j=0; j<4; j++) {
printf("%.12le ", sett->vedva[i][j]);
}
printf("\n");
}
}
*/
gsl_vector_free (eval);
gsl_matrix_free (evec);
} // end of manage grid matrix
#endif
/*---------------------------------------------------------------------------*/
/*
Initialize CUDA: cuinit
- sets cuda device to (in priority order): cdev, 0
- returns: device id or -1 on error
*/
int cuinit(int cdev)
{
int dev, deviceCount = 0;
hipDeviceProp_t deviceProp;
if (hipGetDeviceCount(&deviceCount) != hipSuccess) {
printf("ERROR: hipGetDeviceCount FAILED CUDA Driver and Runtime version may be mismatched.\n");
return(-1);
}
if (deviceCount == 0) {
printf("ERROR: There is no device supporting CUDA\n");
return(-1);
}
if (cdev < 0 && cdev >= deviceCount) {
printf("\nWARNING: Device %d is not available! Trying device 0\n", cdev);
cdev = 0;
}
printf("__________________________________CUDA devices___________________________________\n");
printf("Set | ID | Name | Gmem(B) | Smem(B) | Cmem(B) | C.Cap. | Thr/bl |\n");
for (dev = 0; dev < deviceCount; ++dev) {
hipGetDeviceProperties(&deviceProp, dev);
if (deviceProp.major == 9999 && deviceProp.minor == 9999) {
printf("- | %1d | %16s | Error | Error | Error | Error | Error |\n", dev, deviceProp.name );
if ( dev==cdev ) {
printf("ERROR: Can't set device %d\n", cdev);
return(-1);
}
}
if (dev==cdev) {
printf(" * |");
hipSetDevice(cdev);
} else {
printf(" |");
}
printf(" %1d | %18.18s | %11Zu | %7Zu | %7Zu | %d.%d | %6d |\n",
dev, deviceProp.name, deviceProp.totalGlobalMem, deviceProp.sharedMemPerBlock,
deviceProp.totalConstMem, deviceProp.major, deviceProp.minor, deviceProp.maxThreadsPerBlock );
}
printf("---------------------------------------------------------------------------------\n");
/* enable mapped memory */
hipSetDeviceFlags(hipDeviceMapHost);
/* force initialization */
hipDeviceSynchronize();
return(cdev);
}
|
5b400582127268d4b96200cd9e83d8a7ac15350b.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <math.h>
#include <complex.h>
#include <string.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <getopt.h>
#include <time.h>
#include "cuda_error.h"
#include "init.h"
#include "struct.h"
#include "settings.h"
#include "auxi.h"
#include "kernels.h"
#include "spline_z.h"
#include <cuda_runtime_api.h>
#include <cufft.h>
/* Command line options handling: search
*/
void handle_opts( Search_settings *sett,
Command_line_opts *opts,
int argc,
char* argv[]) {
opts->hemi=0;
opts->wd=NULL;
// Default F-statistic threshold
opts->trl=20;
strcpy (opts->prefix, TOSTR(PREFIX));
strcpy (opts->dtaprefix, TOSTR(DTAPREFIX));
opts->label[0] = '\0';
opts->range[0] = '\0';
opts->getrange[0] = '\0';
opts->usedet[0] = '\0';
opts->addsig[0] = '\0';
// Initial value of starting frequency set to a negative quantity.
// If this is not changed by the command line value, fpo is calculated
// from the band number b (fpo = fpo = fstart + 0.96875*b/(2dt))
sett->fpo = -1;
// Default initial value of the data sampling time
sett->dt = 0.5;
opts->help_flag=0;
opts->white_flag=0;
opts->s0_flag=0;
opts->checkp_flag=0;
static int help_flag=0, white_flag=0, s0_flag=0, checkp_flag=1;
// Reading arguments
while (1) {
static struct option long_options[] = {
{"help", no_argument, &help_flag, 1},
{"whitenoise", no_argument, &white_flag, 1},
{"nospindown", no_argument, &s0_flag, 1},
{"nocheckpoint", no_argument, &checkp_flag, 0},
// frame number
{"ident", required_argument, 0, 'i'},
// frequency band number
{"band", required_argument, 0, 'b'},
// output directory
{"output", required_argument, 0, 'o'},
// input data directory
{"data", required_argument, 0, 'd'},
// non-standard label for naming files
{"label", required_argument, 0, 'l'},
// narrower grid range parameter file
{"range", required_argument, 0, 'r'},
// write full grid range to file
{"getrange", required_argument, 0, 'g'},
// change directory parameter
{"cwd", required_argument, 0, 'c'},
// interpolation method
{"threshold", required_argument, 0, 't'},
// hemisphere
{"hemisphere", required_argument, 0, 'h'},
// fpo value
{"fpo", required_argument, 0, 'p'},
// add signal parameters
{"addsig", required_argument, 0, 'x'},
// which detectors to use
{"usedet", required_argument, 0, 'u'},
// data sampling time
{"dt", required_argument, 0, 's'},
{0, 0, 0, 0}
};
if (help_flag) {
printf("polgraw-allsky periodic GWs: search for candidate signals with the F-statistic\n");
printf("Usage: ./search -[switch1] <value1> -[switch2] <value2> ...\n") ;
printf("Switches are:\n\n");
printf("-d, -data Data directory (default is .)\n");
printf("-o, -output Output directory (default is ./candidates)\n");
printf("-i, -ident Frame number\n");
printf("-b, -band Band number\n");
printf("-l, -label Custom label for the input and output files\n");
printf("-r, -range Use file with grid range or pulsar position\n");
printf("-g, -getrange Write grid ranges & exit (ignore -r)\n");
printf("-c, -cwd Change to directory <dir>\n");
printf("-t, -threshold Threshold for the F-statistic (default is 20)\n");
printf("-h, -hemisphere Hemisphere (default is 0 - does both)\n");
printf("-p, -fpo Reference band frequency fpo value\n");
printf("-s, -dt data sampling time dt (default value: 0.5)\n");
printf("-u, -usedet Use only detectors from string (default is use all available)\n");
printf("-x, -addsig Add signal with parameters from <file>\n\n");
printf("Also:\n\n");
printf("--whitenoise White Gaussian noise assumed\n");
printf("--nospindown Spindowns neglected\n");
printf("--nocheckpoint State file won't be created (no checkpointing)\n");
printf("--help This help\n");
exit(EXIT_SUCCESS);
}
int option_index = 0;
int c = getopt_long_only(argc, argv, "i:b:o:d:l:r:g:c:t:h:p:x:s:u:",
long_options, &option_index);
if (c == -1)
break;
switch (c) {
case 'i':
opts->ident = atoi (optarg);
break;
case 't':
opts->trl = atof(optarg);
break;
case 'h':
opts->hemi = atof(optarg);
break;
case 'b':
opts->band = atoi(optarg);
break;
case 'o':
strcpy(opts->prefix, optarg);
break;
case 'd':
strcpy(opts->dtaprefix, optarg);
break;
case 'l':
opts->label[0] = '_';
strcpy(1+opts->label, optarg);
break;
case 'r':
strcpy(opts->range, optarg);
break;
case 'g':
strcpy(opts->getrange, optarg);
break;
case 'c':
opts->wd = (char *) malloc (1+strlen(optarg));
strcpy(opts->wd, optarg);
break;
case 'p':
sett->fpo = atof(optarg);
break;
case 'x':
strcpy(opts->addsig, optarg);
break;
case 's':
sett->dt = atof(optarg);
break;
case 'u':
strcpy(opts->usedet, optarg);
break;
case '?':
break;
default:
break ;
} /* switch c */
} /* while 1 */
opts->white_flag = white_flag;
opts->s0_flag = s0_flag;
opts->checkp_flag = checkp_flag;
printf("Input data directory is %s\n", opts->dtaprefix);
printf("Output directory is %s\n", opts->prefix);
printf("Frame and band numbers are %d and %d\n", opts->ident, opts->band);
// Starting band frequency:
// fpo_val is optionally read from the command line
// Its initial value is set to -1
if(!(sett->fpo >= 0))
// The usual definition (multiplying the offset by B=1/(2dt))
// !!! in RDC_O1 the fstart equals 10, not 100 like in VSR1 !!!
//
sett->fpo = 10. + 0.96875*opts->band*(0.5/sett->dt);
printf("The reference frequency fpo is %f\n", sett->fpo);
printf("The data sampling time dt is %f\n", sett->dt);
if (opts->white_flag)
printf ("Assuming white Gaussian noise\n");
// For legacy: FFT is now the only option
printf ("Using fftinterp=FFT (FFT interpolation by zero-padding)\n");
if(opts->trl!=20)
printf ("Threshold for the F-statistic is %lf\n", opts->trl);
if(opts->hemi)
printf ("Search for hemisphere %d\n", opts->hemi);
if (opts->s0_flag)
printf ("Assuming s_1 = 0.\n");
if (strlen(opts->label))
printf ("Using '%s' as data label\n", opts->label);
if(strlen(opts->getrange)){
printf ("Writing full grid ranges to '%s'\n", opts->getrange);
if(strlen(opts->range)) {
opts->range[0] = '\0';
printf (" WARNING! -r option will be ignored...\n");
}
}
if (strlen(opts->range))
printf ("Obtaining grid range from '%s'\n", opts->range);
if (strlen(opts->addsig))
printf ("Adding signal from '%s'\n", opts->addsig);
if (opts->wd) {
printf ("Changing working directory to %s\n", opts->wd);
if (chdir(opts->wd)) { perror (opts->wd); abort (); }
}
} // end of command line options handling
/* Generate grid from the M matrix (grid.bin)
*/
void read_grid( Search_settings *sett,
Command_line_opts *opts) {
sett->M = (double *) calloc (16, sizeof (double));
FILE *data;
char filename[512];
// In case when -usedet option is used for one detector
// i.e. opts->usedet has a length of 2 (e.g. H1 or V1),
// read grid.bin from this detector subdirectory
// (see detectors_settings() in settings.c for details)
if(strlen(opts->usedet)==2)
sprintf (filename, "%s/%03d/%s/grid.bin", opts->dtaprefix, opts->ident, opts->usedet);
else
sprintf (filename, "%s/%03d/grid.bin", opts->dtaprefix, opts->ident);
if ((data=fopen (filename, "r")) != NULL) {
printf("Using grid file from %s\n", filename);
fread ((void *)&sett->fftpad, sizeof(int), 1, data);
printf("Using fftpad from the grid file: %d\n", sett->fftpad);
// M: vector of 16 components consisting of 4 rows
// of 4x4 grid-generating matrix
fread ((void *)sett->M, sizeof(double), 16, data);
fclose (data);
} else {
perror (filename);
exit(EXIT_FAILURE);
}
} // end of read grid
/* Array initialization */
void init_arrays( Search_settings *sett,
Command_line_opts *opts,
Aux_arrays *aux_arr,
double **F_d) {
int i, status;
// Allocates and initializes to zero the data, detector ephemeris
// and the F-statistic arrays
FILE *data;
sett->Ninterp = sett->interpftpad*sett->nfft;
sett->nfftf = sett->fftpad*sett->nfft;
for(i=0; i<sett->nifo; i++) {
/// ifo[i].sig.xDat = (double *) calloc(sett->N, sizeof(double));
/// mapped memory works for CUDART_VERSION >= 2020
/// we should test if it's available, if not copy data explicitly to device
CudaSafeCall( cudaHostAlloc((void **)&(ifo[i].sig.xDat), sett->N*sizeof(double),
cudaHostAllocMapped) );
CudaSafeCall( cudaHostGetDevicePointer((void **)&(ifo[i].sig.xDat_d),
(void *)ifo[i].sig.xDat, 0) );
// Input time-domain data handling
//
// The file name ifo[i].xdatname is constructed
// in settings.c, while looking for the detector
// subdirectories
if((data = fopen(ifo[i].xdatname, "r")) != NULL) {
status = fread((void *)(ifo[i].sig.xDat),
sizeof(double), sett->N, data);
fclose (data);
} else {
perror (ifo[i].xdatname);
exit(EXIT_FAILURE);
}
int j, Nzeros=0;
// Checking for null values in the data
for(j=0; j < sett->N; j++)
if(!ifo[i].sig.xDat[j]) Nzeros++;
ifo[i].sig.Nzeros = Nzeros;
// factor N/(N - Nzeros) to account for null values in the data
ifo[i].sig.crf0 = (double)sett->N/(sett->N - ifo[i].sig.Nzeros);
// Estimation of the variance for each detector
ifo[i].sig.sig2 = (ifo[i].sig.crf0)*var(ifo[i].sig.xDat, sett->N);
CudaSafeCall( cudaHostAlloc((void **)&(ifo[i].sig.DetSSB), 3*sett->N*sizeof(double),
cudaHostAllocMapped) );
CudaSafeCall( cudaHostGetDevicePointer((void **)&(ifo[i].sig.DetSSB_d),
(void *)ifo[i].sig.DetSSB, 0) );
// Ephemeris file handling
char filename[512];
sprintf (filename, "%s/%03d/%s/DetSSB.bin",
opts->dtaprefix, opts->ident, ifo[i].name);
if((data = fopen(filename, "r")) != NULL) {
// Detector position w.r.t Solar System Baricenter
// for every datapoint
status = fread((void *)(ifo[i].sig.DetSSB),
sizeof(double), 3*sett->N, data);
// Deterministic phase defining the position of the Earth
// in its diurnal motion at t=0
status = fread((void *)(&ifo[i].sig.phir),
sizeof(double), 1, data);
// Earth's axis inclination to the ecliptic at t=0
status = fread((void *)(&ifo[i].sig.epsm),
sizeof(double), 1, data);
fclose (data);
printf("Using %s as detector %s ephemerids...\n", filename, ifo[i].name);
} else {
perror (filename);
return ;
}
// sincos
ifo[i].sig.sphir = sin(ifo[i].sig.phir);
ifo[i].sig.cphir = cos(ifo[i].sig.phir);
ifo[i].sig.sepsm = sin(ifo[i].sig.epsm);
ifo[i].sig.cepsm = cos(ifo[i].sig.epsm);
sett->sepsm = ifo[i].sig.sepsm;
sett->cepsm = ifo[i].sig.cepsm;
CudaSafeCall( cudaMalloc((void**)&ifo[i].sig.xDatma_d,
sizeof(cufftDoubleComplex)*sett->N) );
CudaSafeCall( cudaMalloc((void**)&ifo[i].sig.xDatmb_d,
sizeof(cufftDoubleComplex)*sett->N) );
CudaSafeCall( cudaMalloc((void**)&(ifo[i].sig.aa_d),
sizeof(double)*sett->N) );
CudaSafeCall( cudaMalloc((void**)&(ifo[i].sig.bb_d),
sizeof(double)*sett->N) );
CudaSafeCall( cudaMalloc((void**)&(ifo[i].sig.shft_d),
sizeof(double)*sett->N) );
CudaSafeCall( cudaMalloc((void**)&(ifo[i].sig.shftf_d),
sizeof(double)*sett->N) );
} // end loop for detectors
// Check if the ephemerids have the same epsm parameter
for(i=1; i<sett->nifo; i++) {
if(!(ifo[i-1].sig.sepsm == ifo[i].sig.sepsm)) {
printf("The parameter epsm (DetSSB.bin) differs for detectors %s and %s. Aborting...\n", ifo[i-1].name, ifo[i].name);
exit(EXIT_FAILURE);
}
}
// if all is well with epsm, take the first value
sett->sepsm = ifo[0].sig.sepsm;
sett->cepsm = ifo[0].sig.cepsm;
// *F = (double *) calloc(2*sett->nfft, sizeof(double));
CudaSafeCall ( cudaMalloc((void **)F_d, 2*sett->nfft*sizeof(double)));
// Auxiliary arrays, Earth's rotation
CudaSafeCall( cudaMalloc((void**)&(aux_arr->t2_d),
sizeof(double)*sett->N) );
CudaSafeCall( cudaMalloc((void**)&(aux_arr->cosmodf_d),
sizeof(double)*sett->N) );
CudaSafeCall( cudaMalloc((void**)&(aux_arr->sinmodf_d),
sizeof(double)*sett->N) );
CudaSafeCall( cudaMalloc((void**)&(aux_arr->tshift_d),
sizeof(double)*sett->N) );
init_spline_matrices(&aux_arr->diag_d, &aux_arr->ldiag_d, &aux_arr->udiag_d,
&aux_arr->B_d, sett->Ninterp);
compute_sincosmodf<<<sett->N/256+1,256>>>(aux_arr->sinmodf_d, aux_arr->cosmodf_d,
sett->omr, sett->N);
} // end of init arrays
/* Search range */
void set_search_range( Search_settings *sett,
Command_line_opts *opts,
Search_range *s_range) {
// Hemispheres (with respect to the ecliptic)
if(opts->hemi) {
s_range->pmr[0] = opts->hemi;
s_range->pmr[1] = opts->hemi;
} else {
s_range->pmr[0] = 1;
s_range->pmr[1] = 2;
}
// If the parameter range is invoked, the search is performed
// within the range of grid parameters from an ascii file
// ("-r range_file" from the command line)
FILE *data;
if (strlen (opts->range)) {
if ((data=fopen (opts->range, "r")) != NULL) {
int aqq = fscanf (data, "%d %d %d %d %d %d %d %d",
s_range->spndr, 1+s_range->spndr, s_range->nr,
1+s_range->nr, s_range->mr, 1+s_range->mr,
s_range->pmr, 1+s_range->pmr);
if (aqq != 8) {
printf("Error when reading range file!\n");
exit(EXIT_FAILURE);
}
fclose (data);
} else {
perror (opts->range);
exit(EXIT_FAILURE);
}
} else {
// Establish the grid range in which the search will be performed
// with the use of the M matrix from grid.bin
gridr(
sett->M,
s_range->spndr,
s_range->nr,
s_range->mr,
sett->oms,
sett->Smax);
if (strlen(opts->getrange)) {
FILE *data;
if ((data=fopen (opts->getrange, "w")) != NULL) {
fprintf(data, "%d %d\n%d %d\n%d %d\n%d %d\n",
s_range->spndr[0], s_range->spndr[1],
s_range->nr[0], s_range->nr[1],
s_range->mr[0], s_range->mr[1],
s_range->pmr[0], s_range->pmr[1] );
printf("Wrote input data grid ranges to %s\n", opts->getrange);
fclose (data);
// exit(EXIT_SUCCESS);
} else {
printf("Can't open %s file for writing\n", opts->getrange);
exit(EXIT_FAILURE);
}
}
}
printf("set_search_range() - the grid ranges are maximally this:\n");
printf("(spndr, nr, mr, pmr pairs): %d %d %d %d %d %d %d %d\n", \
s_range->spndr[0], s_range->spndr[1], s_range->nr[0], s_range->nr[1],
s_range->mr[0], s_range->mr[1], s_range->pmr[0], s_range->pmr[1]);
printf("Smin: %le, -Smax: %le\n", sett->Smin, sett->Smax);
} // end of set search range
/* FFT Plans
*/
void plan_fft (Search_settings *sett,
// Command_line_opts *opts,
FFT_plans *plans,
FFT_arrays *fft_arr
// Aux_arrays *aux_arr
) {
// sett->Ninterp = sett->interpftpad*sett->nfft; //moved to init_arrays
fft_arr->arr_len = (sett->fftpad*sett->nfft > sett->Ninterp
? sett->fftpad*sett->nfft : sett->Ninterp);
CudaSafeCall ( cudaMalloc((void **)&fft_arr->xa_d, 2*fft_arr->arr_len*sizeof(cufftDoubleComplex)) );
fft_arr->xb_d = fft_arr->xa_d + fft_arr->arr_len;
// sett->nfftf = sett->fftpad*sett->nfft; // moved to init_arrays
// no need for plans '2' - dimaensions are the same
cufftPlan1d( &(plans->plan), sett->nfftf, CUFFT_Z2Z, 1);
cufftPlan1d( &(plans->pl_int), sett->nfft, CUFFT_Z2Z, 1);
cufftPlan1d( &(plans->pl_inv), sett->Ninterp, CUFFT_Z2Z, 1);
CudaSafeCall ( cudaMalloc((void **)&fft_arr->xa_d, 2*fft_arr->arr_len*sizeof(cufftDoubleComplex)) );
}
/* Checkpointing */
void read_checkpoints(Command_line_opts *opts,
Search_range *s_range,
int *FNum) {
if(opts->checkp_flag) {
// filename of checkpoint state file, depending on the hemisphere
if(opts->hemi)
sprintf(opts->qname, "state_%03d_%04d%s_%d.dat",
opts->ident, opts->band, opts->label, opts->hemi);
else
sprintf(opts->qname, "state_%03d_%04d%s.dat",
opts->ident, opts->band, opts->label);
FILE *state;
if((state = fopen(opts->qname, "r")) != NULL) {
// Scan the state file to get last recorded parameters
if((fscanf(state, "%d %d %d %d %d", &s_range->pst, &s_range->mst,
&s_range->nst, &s_range->sst, FNum)) == EOF) {
// This means that state file is empty (=end of the calculations)
fprintf (stderr, "State file empty: nothing to do...\n");
fclose (state);
return;
}
fclose (state);
// No state file - start from the beginning
} else {
s_range->pst = s_range->pmr[0];
s_range->mst = s_range->mr[0];
s_range->nst = s_range->nr[0];
s_range->sst = s_range->spndr[0];
*FNum = 0;
} // if state
} else {
s_range->pst = s_range->pmr[0];
s_range->mst = s_range->mr[0];
s_range->nst = s_range->nr[0];
s_range->sst = s_range->spndr[0];
*FNum = 0;
} // if checkp_flag
} // end reading checkpoints
/* Cleanup & memory free */
void cleanup(
Search_settings *sett,
Command_line_opts *opts,
Search_range *s_range,
FFT_plans *plans,
FFT_arrays *fft_arr,
Aux_arrays *aux,
double *F_d) {
int i;
for(i=0; i<sett->nifo; i++) {
CudaSafeCall( cudaFreeHost(ifo[i].sig.xDat) );
CudaSafeCall( cudaFreeHost(ifo[i].sig.DetSSB) );
CudaSafeCall( cudaFree(ifo[i].sig.xDatma_d) );
CudaSafeCall( cudaFree(ifo[i].sig.xDatmb_d) );
CudaSafeCall( cudaFree(ifo[i].sig.aa_d) );
CudaSafeCall( cudaFree(ifo[i].sig.bb_d) );
CudaSafeCall( cudaFree(ifo[i].sig.shft_d) );
CudaSafeCall( cudaFree(ifo[i].sig.shftf_d) );
}
CudaSafeCall( cudaFree(aux->cosmodf_d) );
CudaSafeCall( cudaFree(aux->sinmodf_d) );
CudaSafeCall( cudaFree(aux->t2_d) );
CudaSafeCall( cudaFree(F_d) );
CudaSafeCall( cudaFree(fft_arr->xa_d) );
free(sett->M);
cufftDestroy(plans->plan);
cufftDestroy(plans->pl_int);
cufftDestroy(plans->pl_inv);
} // end of cleanup & memory free
/* Command line options handling: coincidences */
void handle_opts_coinc( Search_settings *sett,
Command_line_opts_coinc *opts,
int argc,
char* argv[]) {
opts->wd=NULL;
strcpy (opts->prefix, TOSTR(PREFIX));
strcpy (opts->dtaprefix, TOSTR(DTAPREFIX));
// Default initial value of the data sampling time
sett->dt = 0.5;
opts->help_flag=0;
static int help_flag=0;
// Default value of the minimal number of coincidences
opts->mincoin=3;
// Default value of the narrow-down parameter
opts->narrowdown=0.5;
// Default value of the cell shift: 0000 (no shifts)
opts->shift=0;
// Default value of the cell scaling: 1111 (no scaling)
opts->scale=1111;
// Default signal-to-noise threshold cutoff
opts->snrcutoff=6;
// Reading arguments
while (1) {
static struct option long_options[] = {
{"help", no_argument, &help_flag, 1},
// Cell shifts
{"shift", required_argument, 0, 's'},
// Cell scaling
{"scale", required_argument, 0, 'z'},
// Reference frame number
{"refr", required_argument, 0, 'r'},
// output directory
{"output", required_argument, 0, 'o'},
// input data directory
{"data", required_argument, 0, 'd'},
// fpo value
{"fpo", required_argument, 0, 'p'},
// data sampling time
{"dt", required_argument, 0, 't'},
// triggers' name prefactor
{"trigname", required_argument, 0, 'e'},
// Location of the reference grid.bin and starting_date files
{"refloc", required_argument, 0, 'g'},
// Minimal number of coincidences recorded in the output
{"mincoin", required_argument, 0, 'm'},
// Narrow down the frequency band (+- the center of band)
{"narrowdown", required_argument, 0, 'n'},
// Signal-to-noise threshold cutoff
{"snrcutoff", required_argument, 0, 'c'},
{0, 0, 0, 0}
};
if (help_flag) {
printf("polgraw-allsky periodic GWs: search for concidences among candidates\n");
printf("Usage: ./coincidences -[switch1] <value1> -[switch2] <value2> ...\n") ;
printf("Switches are:\n\n");
printf("-data Data directory (default is ./candidates)\n");
printf("-output Output directory (default is ./coinc-results)\n");
printf("-shift Cell shifts in fsda directions (4 digit number, e.g. 0101, default 0000)\n");
printf("-scale Cell scaling in fsda directions (4 digit number, e.g. 4824, default 1111)\n");
printf("-refr Reference frame number\n");
printf("-fpo Reference band frequency fpo value\n");
printf("-dt Data sampling time dt (default value: 0.5)\n");
printf("-trigname Part of triggers' name (for identifying files)\n");
printf("-refloc Location of the reference grid.bin and starting_date files\n");
printf("-mincoin Minimal number of coincidences recorded\n");
printf("-narrowdown Narrow-down the frequency band (range [0, 0.5] +- around center)\n");
printf("-snrcutoff Signal-to-noise threshold cutoff (default value: 6)\n\n");
printf("Also:\n\n");
printf("--help This help\n");
exit (0);
}
int option_index = 0;
int c = getopt_long_only (argc, argv, "p:o:d:s:z:r:t:e:g:m:n:c:", long_options, &option_index);
if (c == -1)
break;
switch (c) {
case 'p':
sett->fpo = atof(optarg);
break;
case 's': // Cell shifts
opts->shift = atof(optarg);
break;
case 'z': // Cell scaling
opts->scale = atoi(optarg);
break;
case 'r':
opts->refr = atoi(optarg);
break;
case 'o':
strcpy(opts->prefix, optarg);
break;
case 'd':
strcpy(opts->dtaprefix, optarg);
break;
case 't':
sett->dt = atof(optarg);
break;
case 'e':
strcpy(opts->trigname, optarg);
break;
case 'g':
strcpy(opts->refloc, optarg);
break;
case 'm':
opts->mincoin = atoi(optarg);
break;
case 'n':
opts->narrowdown = atof(optarg);
break;
case 'c':
opts->snrcutoff = atof(optarg);
break;
case '?':
break;
default:
break ;
} /* switch c */
} /* while 1 */
// Putting the parameter in triggers' frequency range [0, pi]
opts->narrowdown *= M_PI;
printf("#mb add info at the beginning...\n");
printf("The SNR threshold cutoff is %.12f, ", opts->snrcutoff);
printf("corresponding to F-statistic value of %.12f\n",
pow(opts->snrcutoff, 2)/2. + 2);
} // end of command line options handling: coincidences
#if 0
/* Manage grid matrix (read from grid.bin, find eigenvalues
* and eigenvectors) and reference GPS time from starting_time
* (expected to be in the same directory)
*/
void manage_grid_matrix(
Search_settings *sett,
Command_line_opts_coinc *opts) {
sett->M = (double *)calloc(16, sizeof (double));
FILE *data;
char filename[512];
sprintf (filename, "%s/grid.bin", opts->refloc);
if ((data=fopen (filename, "r")) != NULL) {
printf("Reading the reference grid.bin at %s\n", opts->refloc);
fread ((void *)&sett->fftpad, sizeof (int), 1, data);
printf("fftpad from the grid file: %d\n", sett->fftpad);
fread ((void *)sett->M, sizeof(double), 16, data);
// We actually need the second (Fisher) matrix from grid.bin,
// hence the second fread:
fread ((void *)sett->M, sizeof(double), 16, data);
fclose (data);
} else {
perror (filename);
exit(EXIT_FAILURE);
}
/* //#mb seems not needed at the moment
sprintf (filename, "%s/starting_date", opts->refloc);
if ((data=fopen (filename, "r")) != NULL) {
fscanf(data, "%le", &opts->refgps);
printf("Reading the reference starting_date file at %s The GPS time is %12f\n", opts->refloc, opts->refgps);
fclose (data);
} else {
perror (filename);
exit(EXIT_FAILURE);
}
*/
// Calculating the eigenvectors and eigenvalues
gsl_matrix_view m = gsl_matrix_view_array(sett->M, 4, 4);
gsl_vector *eval = gsl_vector_alloc(4);
gsl_matrix *evec = gsl_matrix_alloc(4, 4);
gsl_eigen_symmv_workspace *w = gsl_eigen_symmv_alloc(4);
gsl_eigen_symmv(&m.matrix, eval, evec, w);
gsl_eigen_symmv_free(w);
double eigval[4], eigvec[4][4];
// Saving the results to the settings struct sett->vedva[][]
{ int i, j;
for(i=0; i<4; i++) {
eigval[i] = gsl_vector_get(eval, i);
gsl_vector_view evec_i = gsl_matrix_column(evec, i);
for(j=0; j<4; j++)
eigvec[j][i] = gsl_vector_get(&evec_i.vector, j);
}
// This is an auxiliary matrix composed of the eigenvector
// columns multiplied by a matrix with sqrt(eigenvalues) on diagonal
for(i=0; i<4; i++) {
for(j=0; j<4; j++) {
sett->vedva[i][j] = eigvec[i][j]*sqrt(eigval[j]);
// printf("%.12le ", sett->vedva[i][j]);
}
// printf("\n");
}
}
/*
//#mb matrix generated in matlab, for tests
double _tmp[4][4] = {
{-2.8622034614137332e-001, -3.7566564762376159e-002, -4.4001551065376701e-012, -3.4516253934827171e-012},
{-2.9591999145463371e-001, 3.6335210834374479e-002, 8.1252443441098394e-014, -6.8170555119669981e-014},
{1.5497867603229576e-005, 1.9167007413107127e-006, 1.0599051611325639e-008, -5.0379548388381567e-008},
{2.4410008440913992e-005, 3.2886518554938671e-006, -5.7338464150027107e-008, -9.3126913365595100e-009},
};
{ int i,j;
for(i=0; i<4; i++)
for(j=0; j<4; j++)
sett->vedva[i][j] = _tmp[i][j];
}
printf("\n");
{ int i, j;
for(i=0; i<4; i++) {
for(j=0; j<4; j++) {
printf("%.12le ", sett->vedva[i][j]);
}
printf("\n");
}
}
*/
gsl_vector_free (eval);
gsl_matrix_free (evec);
} // end of manage grid matrix
#endif
/*---------------------------------------------------------------------------*/
/*
Initialize CUDA: cuinit
- sets cuda device to (in priority order): cdev, 0
- returns: device id or -1 on error
*/
int cuinit(int cdev)
{
int dev, deviceCount = 0;
cudaDeviceProp deviceProp;
if (cudaGetDeviceCount(&deviceCount) != cudaSuccess) {
printf("ERROR: cudaGetDeviceCount FAILED CUDA Driver and Runtime version may be mismatched.\n");
return(-1);
}
if (deviceCount == 0) {
printf("ERROR: There is no device supporting CUDA\n");
return(-1);
}
if (cdev < 0 && cdev >= deviceCount) {
printf("\nWARNING: Device %d is not available! Trying device 0\n", cdev);
cdev = 0;
}
printf("__________________________________CUDA devices___________________________________\n");
printf("Set | ID | Name | Gmem(B) | Smem(B) | Cmem(B) | C.Cap. | Thr/bl |\n");
for (dev = 0; dev < deviceCount; ++dev) {
cudaGetDeviceProperties(&deviceProp, dev);
if (deviceProp.major == 9999 && deviceProp.minor == 9999) {
printf("- | %1d | %16s | Error | Error | Error | Error | Error |\n", dev, deviceProp.name );
if ( dev==cdev ) {
printf("ERROR: Can't set device %d\n", cdev);
return(-1);
}
}
if (dev==cdev) {
printf(" * |");
cudaSetDevice(cdev);
} else {
printf(" |");
}
printf(" %1d | %18.18s | %11Zu | %7Zu | %7Zu | %d.%d | %6d |\n",
dev, deviceProp.name, deviceProp.totalGlobalMem, deviceProp.sharedMemPerBlock,
deviceProp.totalConstMem, deviceProp.major, deviceProp.minor, deviceProp.maxThreadsPerBlock );
}
printf("---------------------------------------------------------------------------------\n");
/* enable mapped memory */
cudaSetDeviceFlags(cudaDeviceMapHost);
/* force initialization */
cudaThreadSynchronize();
return(cdev);
}
|
b61d2bc0859806c19c193668afd3efff89f792b3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
hipError_t err = hipGetLastError();
if (hipSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
// TODO
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
bools[index] = idata[index] == 0 ? 0 : 1;
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
// TODO
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
if (bools[index]) {
odata[indices[index]] = idata[index];
}
}
}
}
|
b61d2bc0859806c19c193668afd3efff89f792b3.cu
|
#include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
// TODO
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
bools[index] = idata[index] == 0 ? 0 : 1;
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
// TODO
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
if (bools[index]) {
odata[indices[index]] = idata[index];
}
}
}
}
|
cd496884bd35d18b7e95df19e0ee7563b5734691.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/lookup_table_v2_op.h"
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
template <typename T, typename IdT, bool PaddingFlag>
__global__ void LookupTableV2(T *output,
const T *table,
const IdT *ids,
const int64_t N,
const int64_t K,
const int64_t D,
const int64_t padding_idx) {
int idx = threadIdx.x;
int idy = blockIdx.x + threadIdx.y * gridDim.x;
while (idy < K) {
auto id = static_cast<int64_t>(ids[idy]);
T *out = output + idy * D;
const T *tab = table + id * D;
for (int i = idx; i < D; i += blockDim.x) {
if (PaddingFlag) {
if (id == padding_idx)
out[i] = static_cast<T>(0);
else
out[i] = tab[i];
} else {
out[i] = tab[i];
}
}
idy += blockDim.y * gridDim.x;
}
}
template <typename T, typename IdT>
__global__ void LookupTableV2Grad(T *table,
const T *output,
const IdT *ids,
const int64_t N,
const int64_t K,
const int64_t D) {
int idx = threadIdx.x;
int idy = blockIdx.x + threadIdx.y * gridDim.x;
while (idy < K) {
auto id = static_cast<int64_t>(ids[idy]);
const T *out = output + idy * D;
T *tab = table + id * D;
#ifdef PADDLE_WITH_CUDA
paddle::platform::VectorizedAtomicAddPerBlock(D, idx, blockDim.x, out, tab);
#else
for (int i = idx; i < D; i += blockDim.x) {
paddle::platform::CudaAtomicAdd(&tab[i], out[i]);
}
#endif
idy += blockDim.y * gridDim.x;
}
}
template <typename T>
struct LookupTableV2CUDAFunctor {
LookupTableV2CUDAFunctor(const framework::ExecutionContext &context,
const phi::DenseTensor *ids_t)
: context_(context), ids_t_(ids_t) {}
template <typename IdT>
void apply() {
auto *table_t = context_.Input<phi::DenseTensor>("W");
auto *output_t = context_.Output<phi::DenseTensor>("Out");
int64_t padding_idx = context_.Attr<int64_t>("padding_idx");
size_t N = table_t->dims()[0];
size_t D = table_t->dims()[1];
size_t K = ids_t_->numel();
const int gridx = 2 * context_.cuda_device_context().GetSMCount();
dim3 threads(256, 4);
dim3 grids(gridx, 1);
const auto *table = table_t->template data<T>();
const auto *ids = ids_t_->template data<IdT>();
auto *output = output_t->template mutable_data<T>(context_.GetPlace());
auto stream = context_.cuda_device_context().stream();
if (padding_idx == -1) {
hipLaunchKernelGGL(( LookupTableV2<T, IdT, false>), dim3(grids), dim3(threads), 0, stream,
output, table, ids, N, K, D, padding_idx);
} else {
hipLaunchKernelGGL(( LookupTableV2<T, IdT, true>), dim3(grids), dim3(threads), 0, stream,
output, table, ids, N, K, D, padding_idx);
}
}
private:
const framework::ExecutionContext &context_;
const phi::DenseTensor *ids_t_;
};
template <typename T>
class LookupTableV2CUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
const auto *ids_t = context.Input<phi::DenseTensor>("Ids");
LookupTableV2CUDAFunctor<T> functor(context, ids_t);
framework::VisitIntDataType(framework::TransToProtoVarType(ids_t->dtype()),
functor);
}
};
template <typename InT, typename OutT>
__global__ void InputTypeConvert(const InT *in_ids,
const int64_t K,
OutT *out_ids) {
for (int i = 0; i < K; i++) {
out_ids[i] = static_cast<OutT>(in_ids[i]);
}
}
template <typename T>
struct LookupTableV2GradCUDAFunctor {
LookupTableV2GradCUDAFunctor(const framework::ExecutionContext &context,
const phi::DenseTensor *ids_t)
: context_(context), ids_t_(ids_t) {}
template <typename IdT>
void apply() {
auto &dev_ctx = context_.template device_context<phi::GPUContext>();
bool is_sparse = context_.Attr<bool>("is_sparse");
// Since paddings are not trainable and fixed in forward, the gradient of
// paddings makes no sense and we don't deal with it in backward.
if (is_sparse) {
auto *table = context_.Input<phi::DenseTensor>("W");
auto *d_output =
context_.Input<phi::DenseTensor>(framework::GradVarName("Out"));
auto *d_table =
context_.Output<phi::SelectedRows>(framework::GradVarName("W"));
const auto *ids_data = ids_t_->template data<IdT>();
int64_t ids_num = ids_t_->numel();
dim3 threads(128, 8);
dim3 grids(8, 1);
auto stream = dev_ctx.stream();
framework::Vector<int64_t> new_rows;
new_rows.resize(ids_num);
auto gpu_place = context_.GetPlace();
paddle::framework::MixVector<int64_t> mixv_new_rows(&new_rows);
if (!std::is_same<IdT, int64_t>::value) {
hipLaunchKernelGGL(( InputTypeConvert), dim3(grids), dim3(threads), 0, stream,
ids_data, ids_num, mixv_new_rows.MutableData(gpu_place));
} else {
memory::Copy(gpu_place,
mixv_new_rows.CUDAMutableData(gpu_place),
gpu_place,
ids_data,
ids_num * sizeof(int64_t),
stream);
}
mixv_new_rows.CopyToCPU();
d_table->set_rows(new_rows);
auto *d_table_value = d_table->mutable_value();
d_table_value->Resize({ids_num, table->dims()[1]});
d_table_value->template mutable_data<T>(gpu_place);
auto *d_table_data = d_table_value->template data<T>();
auto *d_output_data = d_output->template data<T>();
auto d_output_dims = d_output->dims();
auto d_output_dims_2d =
phi::flatten_to_2d(d_output_dims, d_output_dims.size() - 1);
PADDLE_ENFORCE_EQ(d_table_value->dims(),
d_output_dims_2d,
platform::errors::InvalidArgument(
"ShapeError: The shape of lookup_table@Grad and "
"output@Grad should be same. "
"But received lookup_table@Grad's shape = [%s], "
"output@Grad's shape = [%s].",
d_table_value->dims(),
d_output_dims_2d));
memory::Copy(gpu_place,
d_table_data,
gpu_place,
d_output_data,
d_output->numel() * sizeof(T),
stream);
} else {
auto d_output_t =
context_.Input<phi::DenseTensor>(framework::GradVarName("Out"));
auto d_table_t =
context_.Output<phi::DenseTensor>(framework::GradVarName("W"));
int N = d_table_t->dims()[0];
int D = d_table_t->dims()[1];
int K = ids_t_->numel();
const T *d_output = d_output_t->template data<T>();
const auto *ids = ids_t_->template data<IdT>();
T *d_table = d_table_t->mutable_data<T>(context_.GetPlace());
#ifdef PADDLE_WITH_HIP
PADDLE_ENFORCE_GPU_SUCCESS(
hipMemsetAsync(d_table, 0, N * D * sizeof(T), dev_ctx.stream()));
#else
PADDLE_ENFORCE_GPU_SUCCESS(
hipMemsetAsync(d_table, 0, N * D * sizeof(T), dev_ctx.stream()));
#endif
const int gridx = 2 * dev_ctx.GetSMCount();
dim3 threads(128, 8);
dim3 grids(gridx, 1);
hipLaunchKernelGGL(( LookupTableV2Grad<T, IdT>), dim3(grids), dim3(threads), 0, dev_ctx.stream(),
d_table, d_output, ids, N, K, D);
}
}
private:
const framework::ExecutionContext &context_;
const phi::DenseTensor *ids_t_;
};
template <typename T>
class LookupTableV2GradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
const auto *ids_t = context.Input<phi::DenseTensor>("Ids");
LookupTableV2GradCUDAFunctor<T> functor(context, ids_t);
framework::VisitIntDataType(framework::TransToProtoVarType(ids_t->dtype()),
functor);
}
};
} // namespace operators
} // namespace paddle
|
cd496884bd35d18b7e95df19e0ee7563b5734691.cu
|
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/lookup_table_v2_op.h"
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
template <typename T, typename IdT, bool PaddingFlag>
__global__ void LookupTableV2(T *output,
const T *table,
const IdT *ids,
const int64_t N,
const int64_t K,
const int64_t D,
const int64_t padding_idx) {
int idx = threadIdx.x;
int idy = blockIdx.x + threadIdx.y * gridDim.x;
while (idy < K) {
auto id = static_cast<int64_t>(ids[idy]);
T *out = output + idy * D;
const T *tab = table + id * D;
for (int i = idx; i < D; i += blockDim.x) {
if (PaddingFlag) {
if (id == padding_idx)
out[i] = static_cast<T>(0);
else
out[i] = tab[i];
} else {
out[i] = tab[i];
}
}
idy += blockDim.y * gridDim.x;
}
}
template <typename T, typename IdT>
__global__ void LookupTableV2Grad(T *table,
const T *output,
const IdT *ids,
const int64_t N,
const int64_t K,
const int64_t D) {
int idx = threadIdx.x;
int idy = blockIdx.x + threadIdx.y * gridDim.x;
while (idy < K) {
auto id = static_cast<int64_t>(ids[idy]);
const T *out = output + idy * D;
T *tab = table + id * D;
#ifdef PADDLE_WITH_CUDA
paddle::platform::VectorizedAtomicAddPerBlock(D, idx, blockDim.x, out, tab);
#else
for (int i = idx; i < D; i += blockDim.x) {
paddle::platform::CudaAtomicAdd(&tab[i], out[i]);
}
#endif
idy += blockDim.y * gridDim.x;
}
}
template <typename T>
struct LookupTableV2CUDAFunctor {
LookupTableV2CUDAFunctor(const framework::ExecutionContext &context,
const phi::DenseTensor *ids_t)
: context_(context), ids_t_(ids_t) {}
template <typename IdT>
void apply() {
auto *table_t = context_.Input<phi::DenseTensor>("W");
auto *output_t = context_.Output<phi::DenseTensor>("Out");
int64_t padding_idx = context_.Attr<int64_t>("padding_idx");
size_t N = table_t->dims()[0];
size_t D = table_t->dims()[1];
size_t K = ids_t_->numel();
const int gridx = 2 * context_.cuda_device_context().GetSMCount();
dim3 threads(256, 4);
dim3 grids(gridx, 1);
const auto *table = table_t->template data<T>();
const auto *ids = ids_t_->template data<IdT>();
auto *output = output_t->template mutable_data<T>(context_.GetPlace());
auto stream = context_.cuda_device_context().stream();
if (padding_idx == -1) {
LookupTableV2<T, IdT, false><<<grids, threads, 0, stream>>>(
output, table, ids, N, K, D, padding_idx);
} else {
LookupTableV2<T, IdT, true><<<grids, threads, 0, stream>>>(
output, table, ids, N, K, D, padding_idx);
}
}
private:
const framework::ExecutionContext &context_;
const phi::DenseTensor *ids_t_;
};
template <typename T>
class LookupTableV2CUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
const auto *ids_t = context.Input<phi::DenseTensor>("Ids");
LookupTableV2CUDAFunctor<T> functor(context, ids_t);
framework::VisitIntDataType(framework::TransToProtoVarType(ids_t->dtype()),
functor);
}
};
template <typename InT, typename OutT>
__global__ void InputTypeConvert(const InT *in_ids,
const int64_t K,
OutT *out_ids) {
for (int i = 0; i < K; i++) {
out_ids[i] = static_cast<OutT>(in_ids[i]);
}
}
template <typename T>
struct LookupTableV2GradCUDAFunctor {
LookupTableV2GradCUDAFunctor(const framework::ExecutionContext &context,
const phi::DenseTensor *ids_t)
: context_(context), ids_t_(ids_t) {}
template <typename IdT>
void apply() {
auto &dev_ctx = context_.template device_context<phi::GPUContext>();
bool is_sparse = context_.Attr<bool>("is_sparse");
// Since paddings are not trainable and fixed in forward, the gradient of
// paddings makes no sense and we don't deal with it in backward.
if (is_sparse) {
auto *table = context_.Input<phi::DenseTensor>("W");
auto *d_output =
context_.Input<phi::DenseTensor>(framework::GradVarName("Out"));
auto *d_table =
context_.Output<phi::SelectedRows>(framework::GradVarName("W"));
const auto *ids_data = ids_t_->template data<IdT>();
int64_t ids_num = ids_t_->numel();
dim3 threads(128, 8);
dim3 grids(8, 1);
auto stream = dev_ctx.stream();
framework::Vector<int64_t> new_rows;
new_rows.resize(ids_num);
auto gpu_place = context_.GetPlace();
paddle::framework::MixVector<int64_t> mixv_new_rows(&new_rows);
if (!std::is_same<IdT, int64_t>::value) {
InputTypeConvert<<<grids, threads, 0, stream>>>(
ids_data, ids_num, mixv_new_rows.MutableData(gpu_place));
} else {
memory::Copy(gpu_place,
mixv_new_rows.CUDAMutableData(gpu_place),
gpu_place,
ids_data,
ids_num * sizeof(int64_t),
stream);
}
mixv_new_rows.CopyToCPU();
d_table->set_rows(new_rows);
auto *d_table_value = d_table->mutable_value();
d_table_value->Resize({ids_num, table->dims()[1]});
d_table_value->template mutable_data<T>(gpu_place);
auto *d_table_data = d_table_value->template data<T>();
auto *d_output_data = d_output->template data<T>();
auto d_output_dims = d_output->dims();
auto d_output_dims_2d =
phi::flatten_to_2d(d_output_dims, d_output_dims.size() - 1);
PADDLE_ENFORCE_EQ(d_table_value->dims(),
d_output_dims_2d,
platform::errors::InvalidArgument(
"ShapeError: The shape of lookup_table@Grad and "
"output@Grad should be same. "
"But received lookup_table@Grad's shape = [%s], "
"output@Grad's shape = [%s].",
d_table_value->dims(),
d_output_dims_2d));
memory::Copy(gpu_place,
d_table_data,
gpu_place,
d_output_data,
d_output->numel() * sizeof(T),
stream);
} else {
auto d_output_t =
context_.Input<phi::DenseTensor>(framework::GradVarName("Out"));
auto d_table_t =
context_.Output<phi::DenseTensor>(framework::GradVarName("W"));
int N = d_table_t->dims()[0];
int D = d_table_t->dims()[1];
int K = ids_t_->numel();
const T *d_output = d_output_t->template data<T>();
const auto *ids = ids_t_->template data<IdT>();
T *d_table = d_table_t->mutable_data<T>(context_.GetPlace());
#ifdef PADDLE_WITH_HIP
PADDLE_ENFORCE_GPU_SUCCESS(
hipMemsetAsync(d_table, 0, N * D * sizeof(T), dev_ctx.stream()));
#else
PADDLE_ENFORCE_GPU_SUCCESS(
cudaMemsetAsync(d_table, 0, N * D * sizeof(T), dev_ctx.stream()));
#endif
const int gridx = 2 * dev_ctx.GetSMCount();
dim3 threads(128, 8);
dim3 grids(gridx, 1);
LookupTableV2Grad<T, IdT><<<grids, threads, 0, dev_ctx.stream()>>>(
d_table, d_output, ids, N, K, D);
}
}
private:
const framework::ExecutionContext &context_;
const phi::DenseTensor *ids_t_;
};
template <typename T>
class LookupTableV2GradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
const auto *ids_t = context.Input<phi::DenseTensor>("Ids");
LookupTableV2GradCUDAFunctor<T> functor(context, ids_t);
framework::VisitIntDataType(framework::TransToProtoVarType(ids_t->dtype()),
functor);
}
};
} // namespace operators
} // namespace paddle
|
4dafd64ff5eb10e5c29a2b13ec5f725e9026976d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include<iostream>
#include<iomanip>
#include<stdlib.h>
#include<stdio.h>
#include<assert.h>
#include<math.h>
#include <cusolverDn.h>
#include <hip/hip_runtime_api.h>
#include "Utilities.cuh"
/********/
/* MAIN */
/********/
int main(){
// --- gesvd only supports Nrows >= Ncols
// --- column major memory ordering
const int Nrows = 7;
const int Ncols = 5;
// --- cuSOLVE input/output parameters/arrays
int work_size = 0;
int *devInfo; gpuErrchk(hipMalloc(&devInfo, sizeof(int)));
// --- CUDA solver initialization
hipsolverDnHandle_t solver_handle;
hipsolverDnCreate(&solver_handle);
// --- Setting the host, Nrows x Ncols matrix
double *h_A = (double *)malloc(Nrows * Ncols * sizeof(double));
for(int j = 0; j < Nrows; j++)
for(int i = 0; i < Ncols; i++)
h_A[j + i*Nrows] = (i + j*j) * sqrt((double)(i + j));
// --- Setting the device matrix and moving the host matrix to the device
double *d_A; gpuErrchk(hipMalloc(&d_A, Nrows * Ncols * sizeof(double)));
gpuErrchk(hipMemcpy(d_A, h_A, Nrows * Ncols * sizeof(double), hipMemcpyHostToDevice));
// --- host side SVD results space
double *h_U = (double *)malloc(Nrows * Nrows * sizeof(double));
double *h_V = (double *)malloc(Ncols * Ncols * sizeof(double));
double *h_S = (double *)malloc(min(Nrows, Ncols) * sizeof(double));
// --- device side SVD workspace and matrices
double *d_U; gpuErrchk(hipMalloc(&d_U, Nrows * Nrows * sizeof(double)));
double *d_V; gpuErrchk(hipMalloc(&d_V, Ncols * Ncols * sizeof(double)));
double *d_S; gpuErrchk(hipMalloc(&d_S, min(Nrows, Ncols) * sizeof(double)));
// --- CUDA SVD initialization
cusolveSafeCall(hipsolverDnDgesvd_bufferSize(solver_handle, Nrows, Ncols, &work_size));
double *work; gpuErrchk(hipMalloc(&work, work_size * sizeof(double)));
// --- CUDA SVD execution
cusolveSafeCall(hipsolverDnDgesvd(solver_handle, 'A', 'A', Nrows, Ncols, d_A, Nrows, d_S, d_U, Nrows, d_V, Ncols, work, work_size, NULL, devInfo));
int devInfo_h = 0; gpuErrchk(hipMemcpy(&devInfo_h, devInfo, sizeof(int), hipMemcpyDeviceToHost));
if (devInfo_h != 0) std::cout << "Unsuccessful SVD execution\n\n";
// --- Moving the results from device to host
gpuErrchk(hipMemcpy(h_S, d_S, min(Nrows, Ncols) * sizeof(double), hipMemcpyDeviceToHost));
gpuErrchk(hipMemcpy(h_U, d_U, Nrows * Nrows * sizeof(double), hipMemcpyDeviceToHost));
gpuErrchk(hipMemcpy(h_V, d_V, Ncols * Ncols * sizeof(double), hipMemcpyDeviceToHost));
std::cout << "Singular values\n";
for(int i = 0; i < min(Nrows, Ncols); i++)
std::cout << "d_S["<<i<<"] = " << std::setprecision(15) << h_S[i] << std::endl;
std::cout << "\nLeft singular vectors - For y = A * x, the columns of U span the space of y\n";
for(int j = 0; j < Nrows; j++) {
printf("\n");
for(int i = 0; i < Nrows; i++)
printf("U[%i,%i]=%f\n",i,j,h_U[j*Nrows + i]);
}
std::cout << "\nRight singular vectors - For y = A * x, the columns of V span the space of x\n";
for(int i = 0; i < Ncols; i++) {
printf("\n");
for(int j = 0; j < Ncols; j++)
printf("V[%i,%i]=%f\n",i,j,h_V[j*Ncols + i]);
}
hipsolverDnDestroy(solver_handle);
return 0;
}
|
4dafd64ff5eb10e5c29a2b13ec5f725e9026976d.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include<iostream>
#include<iomanip>
#include<stdlib.h>
#include<stdio.h>
#include<assert.h>
#include<math.h>
#include <cusolverDn.h>
#include <cuda_runtime_api.h>
#include "Utilities.cuh"
/********/
/* MAIN */
/********/
int main(){
// --- gesvd only supports Nrows >= Ncols
// --- column major memory ordering
const int Nrows = 7;
const int Ncols = 5;
// --- cuSOLVE input/output parameters/arrays
int work_size = 0;
int *devInfo; gpuErrchk(cudaMalloc(&devInfo, sizeof(int)));
// --- CUDA solver initialization
cusolverDnHandle_t solver_handle;
cusolverDnCreate(&solver_handle);
// --- Setting the host, Nrows x Ncols matrix
double *h_A = (double *)malloc(Nrows * Ncols * sizeof(double));
for(int j = 0; j < Nrows; j++)
for(int i = 0; i < Ncols; i++)
h_A[j + i*Nrows] = (i + j*j) * sqrt((double)(i + j));
// --- Setting the device matrix and moving the host matrix to the device
double *d_A; gpuErrchk(cudaMalloc(&d_A, Nrows * Ncols * sizeof(double)));
gpuErrchk(cudaMemcpy(d_A, h_A, Nrows * Ncols * sizeof(double), cudaMemcpyHostToDevice));
// --- host side SVD results space
double *h_U = (double *)malloc(Nrows * Nrows * sizeof(double));
double *h_V = (double *)malloc(Ncols * Ncols * sizeof(double));
double *h_S = (double *)malloc(min(Nrows, Ncols) * sizeof(double));
// --- device side SVD workspace and matrices
double *d_U; gpuErrchk(cudaMalloc(&d_U, Nrows * Nrows * sizeof(double)));
double *d_V; gpuErrchk(cudaMalloc(&d_V, Ncols * Ncols * sizeof(double)));
double *d_S; gpuErrchk(cudaMalloc(&d_S, min(Nrows, Ncols) * sizeof(double)));
// --- CUDA SVD initialization
cusolveSafeCall(cusolverDnDgesvd_bufferSize(solver_handle, Nrows, Ncols, &work_size));
double *work; gpuErrchk(cudaMalloc(&work, work_size * sizeof(double)));
// --- CUDA SVD execution
cusolveSafeCall(cusolverDnDgesvd(solver_handle, 'A', 'A', Nrows, Ncols, d_A, Nrows, d_S, d_U, Nrows, d_V, Ncols, work, work_size, NULL, devInfo));
int devInfo_h = 0; gpuErrchk(cudaMemcpy(&devInfo_h, devInfo, sizeof(int), cudaMemcpyDeviceToHost));
if (devInfo_h != 0) std::cout << "Unsuccessful SVD execution\n\n";
// --- Moving the results from device to host
gpuErrchk(cudaMemcpy(h_S, d_S, min(Nrows, Ncols) * sizeof(double), cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(h_U, d_U, Nrows * Nrows * sizeof(double), cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(h_V, d_V, Ncols * Ncols * sizeof(double), cudaMemcpyDeviceToHost));
std::cout << "Singular values\n";
for(int i = 0; i < min(Nrows, Ncols); i++)
std::cout << "d_S["<<i<<"] = " << std::setprecision(15) << h_S[i] << std::endl;
std::cout << "\nLeft singular vectors - For y = A * x, the columns of U span the space of y\n";
for(int j = 0; j < Nrows; j++) {
printf("\n");
for(int i = 0; i < Nrows; i++)
printf("U[%i,%i]=%f\n",i,j,h_U[j*Nrows + i]);
}
std::cout << "\nRight singular vectors - For y = A * x, the columns of V span the space of x\n";
for(int i = 0; i < Ncols; i++) {
printf("\n");
for(int j = 0; j < Ncols; j++)
printf("V[%i,%i]=%f\n",i,j,h_V[j*Ncols + i]);
}
cusolverDnDestroy(solver_handle);
return 0;
}
|
11543b5d6ece756501180bc00db228bc505e9c40.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// #define DEBUG
__device__ void evaluate_2b_maple(
double * a, double * x, double * g, double * energy_buffer
)
{
#include "2b_maple_polynomial.cu"
energy_buffer[0] = energy;
}
__global__ void evaluate_2b_maple_many(double * a, double * x, double * g, double * energy_buffer, int n) {
for (int i=0; i<n; i++) {
evaluate_2b_maple(a, x, g, energy_buffer);
}
}
void launch_evaluate_2b_maple(double * a, double * x, double * g, double * e) {
hipLaunchKernelGGL(( evaluate_2b_maple_many), dim3(1),dim3(1), 0, 0, a, x, g, e, 10000);
hipDeviceSynchronize();
}
|
11543b5d6ece756501180bc00db228bc505e9c40.cu
|
// #define DEBUG
__device__ void evaluate_2b_maple(
double * a, double * x, double * g, double * energy_buffer
)
{
#include "2b_maple_polynomial.cu"
energy_buffer[0] = energy;
}
__global__ void evaluate_2b_maple_many(double * a, double * x, double * g, double * energy_buffer, int n) {
for (int i=0; i<n; i++) {
evaluate_2b_maple(a, x, g, energy_buffer);
}
}
void launch_evaluate_2b_maple(double * a, double * x, double * g, double * e) {
evaluate_2b_maple_many<<<1,1>>>(a, x, g, e, 10000);
cudaDeviceSynchronize();
}
|
add_with_n*n*1_threads.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Kernel definition
__global__ void MatAdd(float *A, float *B,
float *C)
{
int i = threadIdx.x;
int j = threadIdx.y;
C[i][j] = A[i][j] + B[i][j];
}
int main()
{
int N = 10;
float *A, *B, *C, *d_A, *d_B, *d_C;
A = (float*)malloc(N*sizeof(float));
B = (float*)malloc(N*sizeof(float));
C = (float*)malloc(N*sizeof(float));
hipMalloc(&d_A, N*sizeof(float));
hipMalloc(&d_B, N*sizeof(float));
hipMalloc(&d_C, N*sizeof(float));
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++){
A[i][j] = 1.0f;
B[i][j] = 2.0f;
C[i][j] = 0.0f;
}
}
hipMemcpy(d_A, A, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_B, B, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_C, C, N*sizeof(float), hipMemcpyHostToDevice);
// Kernel invocation with one block of N * N * 1 threads
int numBlocks = 1;
dim3 threadsPerBlock(N, N);
hipLaunchKernelGGL(( MatAdd), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C);
}
|
add_with_n*n*1_threads.cu
|
// Kernel definition
__global__ void MatAdd(float *A, float *B,
float *C)
{
int i = threadIdx.x;
int j = threadIdx.y;
C[i][j] = A[i][j] + B[i][j];
}
int main()
{
int N = 10;
float *A, *B, *C, *d_A, *d_B, *d_C;
A = (float*)malloc(N*sizeof(float));
B = (float*)malloc(N*sizeof(float));
C = (float*)malloc(N*sizeof(float));
cudaMalloc(&d_A, N*sizeof(float));
cudaMalloc(&d_B, N*sizeof(float));
cudaMalloc(&d_C, N*sizeof(float));
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++){
A[i][j] = 1.0f;
B[i][j] = 2.0f;
C[i][j] = 0.0f;
}
}
cudaMemcpy(d_A, A, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_C, C, N*sizeof(float), cudaMemcpyHostToDevice);
// Kernel invocation with one block of N * N * 1 threads
int numBlocks = 1;
dim3 threadsPerBlock(N, N);
MatAdd<<<numBlocks, threadsPerBlock>>>(d_A, d_B, d_C);
}
|
6b7ab151ceac4030cac3e587f74de1a1eb4570ce.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel5_plus_4_left;
int xdim0_update_halo_kernel5_plus_4_left_h = -1;
__constant__ int ydim0_update_halo_kernel5_plus_4_left;
int ydim0_update_halo_kernel5_plus_4_left_h = -1;
__constant__ int xdim1_update_halo_kernel5_plus_4_left;
int xdim1_update_halo_kernel5_plus_4_left_h = -1;
__constant__ int ydim1_update_halo_kernel5_plus_4_left;
int ydim1_update_halo_kernel5_plus_4_left_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel5_plus_4_left * (y) + \
xdim0_update_halo_kernel5_plus_4_left * \
ydim0_update_halo_kernel5_plus_4_left * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel5_plus_4_left * (y) + \
xdim1_update_halo_kernel5_plus_4_left * \
ydim1_update_halo_kernel5_plus_4_left * (z))
// user function
__device__
inline void
update_halo_kernel5_plus_4_left_gpu(double *vol_flux_z, double *mass_flux_z,
const int *fields) {
if (fields[FIELD_VOL_FLUX_Z] == 1)
vol_flux_z[OPS_ACC0(0, 0, 0)] = (vol_flux_z[OPS_ACC0(4, 0, 0)]);
if (fields[FIELD_MASS_FLUX_Z] == 1)
mass_flux_z[OPS_ACC1(0, 0, 0)] = (mass_flux_z[OPS_ACC1(4, 0, 0)]);
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel5_plus_4_left(double *__restrict arg0,
double *__restrict arg1,
const int *__restrict arg2,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel5_plus_4_left +
idx_z * 1 * 1 * xdim0_update_halo_kernel5_plus_4_left *
ydim0_update_halo_kernel5_plus_4_left;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel5_plus_4_left +
idx_z * 1 * 1 * xdim1_update_halo_kernel5_plus_4_left *
ydim1_update_halo_kernel5_plus_4_left;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel5_plus_4_left_gpu(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel5_plus_4_left(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 132))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(132, "update_halo_kernel5_plus_4_left");
OPS_kernels[132].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel5_plus_4_left_h ||
ydim0 != ydim0_update_halo_kernel5_plus_4_left_h ||
xdim1 != xdim1_update_halo_kernel5_plus_4_left_h ||
ydim1 != ydim1_update_halo_kernel5_plus_4_left_h) {
hipMemcpyToSymbol(xdim0_update_halo_kernel5_plus_4_left, &xdim0,
sizeof(int));
xdim0_update_halo_kernel5_plus_4_left_h = xdim0;
hipMemcpyToSymbol(ydim0_update_halo_kernel5_plus_4_left, &ydim0,
sizeof(int));
ydim0_update_halo_kernel5_plus_4_left_h = ydim0;
hipMemcpyToSymbol(xdim1_update_halo_kernel5_plus_4_left, &xdim1,
sizeof(int));
xdim1_update_halo_kernel5_plus_4_left_h = xdim1;
hipMemcpyToSymbol(ydim1_update_halo_kernel5_plus_4_left, &ydim1,
sizeof(int));
ydim1_update_halo_kernel5_plus_4_left_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[132].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel5_plus_4_left), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[132].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[132].mpi_time += t2 - t1;
OPS_kernels[132].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[132].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
|
6b7ab151ceac4030cac3e587f74de1a1eb4570ce.cu
|
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel5_plus_4_left;
int xdim0_update_halo_kernel5_plus_4_left_h = -1;
__constant__ int ydim0_update_halo_kernel5_plus_4_left;
int ydim0_update_halo_kernel5_plus_4_left_h = -1;
__constant__ int xdim1_update_halo_kernel5_plus_4_left;
int xdim1_update_halo_kernel5_plus_4_left_h = -1;
__constant__ int ydim1_update_halo_kernel5_plus_4_left;
int ydim1_update_halo_kernel5_plus_4_left_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel5_plus_4_left * (y) + \
xdim0_update_halo_kernel5_plus_4_left * \
ydim0_update_halo_kernel5_plus_4_left * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel5_plus_4_left * (y) + \
xdim1_update_halo_kernel5_plus_4_left * \
ydim1_update_halo_kernel5_plus_4_left * (z))
// user function
__device__
inline void
update_halo_kernel5_plus_4_left_gpu(double *vol_flux_z, double *mass_flux_z,
const int *fields) {
if (fields[FIELD_VOL_FLUX_Z] == 1)
vol_flux_z[OPS_ACC0(0, 0, 0)] = (vol_flux_z[OPS_ACC0(4, 0, 0)]);
if (fields[FIELD_MASS_FLUX_Z] == 1)
mass_flux_z[OPS_ACC1(0, 0, 0)] = (mass_flux_z[OPS_ACC1(4, 0, 0)]);
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel5_plus_4_left(double *__restrict arg0,
double *__restrict arg1,
const int *__restrict arg2,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel5_plus_4_left +
idx_z * 1 * 1 * xdim0_update_halo_kernel5_plus_4_left *
ydim0_update_halo_kernel5_plus_4_left;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel5_plus_4_left +
idx_z * 1 * 1 * xdim1_update_halo_kernel5_plus_4_left *
ydim1_update_halo_kernel5_plus_4_left;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel5_plus_4_left_gpu(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel5_plus_4_left(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 132))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(132, "update_halo_kernel5_plus_4_left");
OPS_kernels[132].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel5_plus_4_left_h ||
ydim0 != ydim0_update_halo_kernel5_plus_4_left_h ||
xdim1 != xdim1_update_halo_kernel5_plus_4_left_h ||
ydim1 != ydim1_update_halo_kernel5_plus_4_left_h) {
cudaMemcpyToSymbol(xdim0_update_halo_kernel5_plus_4_left, &xdim0,
sizeof(int));
xdim0_update_halo_kernel5_plus_4_left_h = xdim0;
cudaMemcpyToSymbol(ydim0_update_halo_kernel5_plus_4_left, &ydim0,
sizeof(int));
ydim0_update_halo_kernel5_plus_4_left_h = ydim0;
cudaMemcpyToSymbol(xdim1_update_halo_kernel5_plus_4_left, &xdim1,
sizeof(int));
xdim1_update_halo_kernel5_plus_4_left_h = xdim1;
cudaMemcpyToSymbol(ydim1_update_halo_kernel5_plus_4_left, &ydim1,
sizeof(int));
ydim1_update_halo_kernel5_plus_4_left_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[132].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel5_plus_4_left<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[132].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[132].mpi_time += t2 - t1;
OPS_kernels[132].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[132].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
|
51a9cd7b02f46231d6ff62d47268c55fcd9ae488.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda_benchmark.h"
#include <iostream>
#include <ratio>
#include <chrono>
__global__ void BM_addInt(int* d_a, int* d_b, int* d_c)
{
*d_c = *d_a + *d_b;
}
static void BM_CUDABasicLatencyTest(benchmark::State& state)
{
while (state.KeepRunning())
{
// Set CUDA deivce
hipSetDevice(benchmarkingDevice);
// Setting Host Memory Variables
auto h_a = 1, h_b = 1, h_c = 0;
// Reserve pointers on Host and allocate memory on device
int *d_a, *d_b, *d_c;
hipMalloc(&d_a, sizeof(int));
hipMalloc(&d_b, sizeof(int));
hipMalloc(&d_c, sizeof(int));
// Move input values to the device
hipMemcpy(d_a, &h_a, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_b, &h_b, sizeof(int), hipMemcpyHostToDevice);
BM_addInt << <1, 1 >> >(d_a, d_b, d_c);
// Move output value to the host
hipMemcpy(&h_c, d_c, sizeof(int), hipMemcpyDeviceToHost);
// Free memory on the device
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
hipDeviceReset();
}
}
BENCHMARK(BM_CUDABasicLatencyTest)
->MinTime(1.0);
__global__ void BM_convergedKernel(float* d_in, float* d_out)
{
int globalId = blockIdx.x * blockDim.x + threadIdx.x;
float x = float(d_in[globalId]);
float y = float(threadIdx.x);
if (x <= 10.0f)
{
for (int i = 0; i < (gridDim.x * blockDim.x - 1); i++)
{
y = y + d_in[i];
}
}
else
{
for (int i = 0; i < (gridDim.x * blockDim.x - 1); i++)
{
y = y - d_in[i];
}
}
}
static void BM_CUDAConvergedExecution(benchmark::State& state)
{
// Dynamic Data Input
int dataSize = state.range(0);
// Set CUDA deivce
hipSetDevice(benchmarkingDevice);
// Get max threads per block
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, benchmarkingDevice);
int threadsPerBlock = prop.maxThreadsPerBlock;
// Calculate Grid size
int gridSize = int(dataSize / threadsPerBlock);
// Generate input data
std::vector<float> inputVector(dataSize, 0);
for (int i = 0; i < dataSize; i++)
{
inputVector[i] = static_cast<float>(rand()) / static_cast<float>(RAND_MAX);
}
float* h_input = &inputVector[0];
// Allocate memory on device
float *d_input, *d_output;
hipMalloc(&d_input, dataSize * sizeof(float));
hipMalloc(&d_output, dataSize * sizeof(float));
// Copy data to device
hipMemcpy(d_input, h_input, dataSize * sizeof(float), hipMemcpyHostToDevice);
while (state.KeepRunning())
{
auto start = std::chrono::high_resolution_clock::now();
BM_convergedKernel << <gridSize, threadsPerBlock >> >(d_input, d_output);
auto end = std::chrono::high_resolution_clock::now();
auto elapsed_seconds =
std::chrono::duration_cast<std::chrono::duration<double>>(
end - start);
state.SetIterationTime(elapsed_seconds.count());
}
// Copy Data from Host to Device
std::vector<float> outputVector(dataSize, 0);
float* h_output = &outputVector[0];
hipMemcpy(h_output, d_output, dataSize * sizeof(float), hipMemcpyDeviceToHost);
// Free memory and reset device
hipFree(d_input);
hipFree(d_output);
hipDeviceReset();
// Calculate Elements processed
state.SetBytesProcessed(state.iterations() * dataSize);
}
BENCHMARK(BM_CUDAConvergedExecution)
->MinTime(1.0)
->UseManualTime()
->Args({ 2048 })
->Args({ 8192 })
->Args({ 65536 })
->Args({ 524288 })
->Args({ 8388608 })
->Args({ 16777216 });
__global__ void BM_divergedKernel(float* d_in, float* d_out)
{
int globalId = blockIdx.x * blockDim.x + threadIdx.x;
float x = float(d_in[globalId]);
float y = float(threadIdx.x);
if (x < 0.5f)
{
for (int i = 0; i < (gridDim.x * blockDim.x - 1); i++)
{
y = y + d_in[i];
}
}
else
{
for (int i = 0; i < (gridDim.x * blockDim.x - 1); i++)
{
y = y - d_in[i];
}
}
}
static void BM_CUDADivergedExecution(benchmark::State& state)
{
// Dynamic Data Input
int dataSize = state.range(0);
// Set CUDA deivce
hipSetDevice(benchmarkingDevice);
// Get max threads per block
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, benchmarkingDevice);
int threadsPerBlock = prop.maxThreadsPerBlock;
// Calculate Grid size
int gridSize = int(dataSize / threadsPerBlock);
// Generate input data
std::vector<float> inputVector(dataSize, 0);
for (int i = 0; i < dataSize; i++)
{
inputVector[i] = static_cast<float>(rand()) / static_cast<float>(RAND_MAX);
}
float* h_input = &inputVector[0];
// Allocate memory on device
float *d_input, *d_output;
hipMalloc(&d_input, dataSize * sizeof(float));
hipMalloc(&d_output, dataSize * sizeof(float));
// Copy data to device
hipMemcpy(d_input, h_input, dataSize * sizeof(float), hipMemcpyHostToDevice);
while (state.KeepRunning())
{
auto start = std::chrono::high_resolution_clock::now();
BM_divergedKernel << <gridSize, threadsPerBlock >> >(d_input, d_output);
auto end = std::chrono::high_resolution_clock::now();
auto elapsed_seconds =
std::chrono::duration_cast<std::chrono::duration<double>>(
end - start);
state.SetIterationTime(elapsed_seconds.count());
}
// Copy Data from Host to Device
std::vector<float> outputVector(dataSize, 0);
float* h_output = &outputVector[0];
hipMemcpy(h_output, d_output, dataSize * sizeof(float), hipMemcpyDeviceToHost);
// Free memory and reset device
hipFree(d_input);
hipFree(d_output);
hipDeviceReset();
// Calculate Elements processed
state.SetBytesProcessed(state.iterations() * dataSize);
}
BENCHMARK(BM_CUDADivergedExecution)
->MinTime(1.0)
->UseManualTime()
->Args({2048})
->Args({8192})
->Args({65536})
->Args({524288})
->Args({8388608})
->Args({16777216});
__global__ void BM_multiDivergedKernel(float* d_in, float* d_out)
{
int globalId = blockIdx.x * blockDim.x + threadIdx.x;
float x = float(d_in[globalId]);
float y = float(threadIdx.x);
int threadId = threadIdx.x;
switch (threadId)
{
case 1: y = 32;
break;
case 2: y = 33;
break;
case 3: y = 34;
break;
case 4: y = 35;
break;
case 5: y = 36;
break;
case 6: y = 37;
break;
case 7: y = 38;
break;
case 8: y = 39;
break;
case 9: y = 40;
break;
case 10: y = 41;
break;
default: y = 42;
break;
}
y = y * x;
d_out[globalId] = y;
}
static void BM_CUDAMultiDivergedExecution(benchmark::State& state)
{
// Dynamic Data Input
int dataSize = state.range(0);
// Set CUDA deivce
hipSetDevice(benchmarkingDevice);
// Get max threads per block
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, benchmarkingDevice);
int threadsPerBlock = prop.maxThreadsPerBlock;
// Calculate Grid size
int gridSize = int(dataSize / threadsPerBlock);
// Generate input data
std::vector<float> inputVector(dataSize, 0);
for (int i = 0; i < dataSize; i++)
{
inputVector[i] = static_cast<float>(rand()) / static_cast<float>(RAND_MAX);
}
float* h_input = &inputVector[0];
// Allocate memory on device
float *d_input, *d_output;
hipMalloc(&d_input, dataSize * sizeof(float));
hipMalloc(&d_output, dataSize * sizeof(float));
// Copy data to device
hipMemcpy(d_input, h_input, dataSize * sizeof(float), hipMemcpyHostToDevice);
while (state.KeepRunning())
{
auto start = std::chrono::high_resolution_clock::now();
BM_divergedKernel << <gridSize, threadsPerBlock >> >(d_input, d_output);
auto end = std::chrono::high_resolution_clock::now();
auto elapsed_seconds =
std::chrono::duration_cast<std::chrono::duration<double>>(
end - start);
state.SetIterationTime(elapsed_seconds.count());
}
// Copy Data from Host to Device
std::vector<float> outputVector(dataSize, 0);
float* h_output = &outputVector[0];
hipMemcpy(h_output, d_output, dataSize * sizeof(float), hipMemcpyDeviceToHost);
// Free memory and reset device
hipFree(d_input);
hipFree(d_output);
hipDeviceReset();
// Calculate Elements processed
state.SetBytesProcessed(state.iterations() * dataSize);
}
BENCHMARK(BM_CUDAMultiDivergedExecution)
->MinTime(1.0)
->UseManualTime()
->Args({2048})
->Args({8192})
->Args({65536})
->Args({524288})
->Args({8388608})
->Args({16777216});
__global__ void BM_multFloat(float* d_in, float* d_out)
{
int globalId = blockIdx.x * blockDim.x + threadIdx.x;
float x = d_in[globalId];
float y = float(threadIdx.x);
y = y * x;
d_out[globalId] = y;
}
static void BM_CUDAFLOPS_GeneratedData(benchmark::State& state)
{
// Dynamic Data Input
int dataSize = state.range(0);
// Set CUDA deivce
hipSetDevice(benchmarkingDevice);
// Get max threads per block
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, benchmarkingDevice);
int threadsPerBlock = prop.maxThreadsPerBlock;
// Calculate Grid size
int gridSize = int(dataSize / threadsPerBlock);
// Generate input data
std::vector<float> inputVector(dataSize, 0);
for (int i = 0; i < dataSize; i++)
{
inputVector[i] = static_cast<float>(rand()) / static_cast<float>(RAND_MAX);
}
float* h_input = &inputVector[0];
// Allocate memory on device
float *d_input, *d_output;
hipMalloc(&d_input, dataSize * sizeof(float));
hipMalloc(&d_output, dataSize * sizeof(float));
// Copy data to device
hipMemcpy(d_input, h_input, dataSize * sizeof(float), hipMemcpyHostToDevice);
while (state.KeepRunning())
{
auto start = std::chrono::high_resolution_clock::now();
BM_multFloat << <gridSize, threadsPerBlock >> >(d_input, d_output);
auto end = std::chrono::high_resolution_clock::now();
auto elapsed_seconds =
std::chrono::duration_cast<std::chrono::duration<double>>(
end - start);
state.SetIterationTime(elapsed_seconds.count());
}
// Copy Data from Host to Device
std::vector<float> outputVector(dataSize, 0);
float* h_output = &outputVector[0];
hipMemcpy(h_output, d_output, dataSize * sizeof(float), hipMemcpyDeviceToHost);
// Free memory and reset device
hipFree(d_input);
hipFree(d_output);
hipDeviceReset();
// Calculate Elements processed
state.SetBytesProcessed(state.iterations() * dataSize);
}
BENCHMARK(BM_CUDAFLOPS_GeneratedData)
->UseManualTime()
->MinTime(1.0)
->Args({ 2048 })
->Args({ 8192 })
->Args({ 65536 })
->Args({ 524288 })
->Args({ 8388608 })
->Args({ 16777216 })
->Args({ 33554432 });
__global__ void BM_multInt(int* d_in, int* d_out)
{
int globalId = blockIdx.x * blockDim.x + threadIdx.x;
int x = d_in[globalId];
int y = threadIdx.x;
y = y * x;
d_out[globalId] = y;
}
static void BM_CUDAIntOPS_GeneratedData(benchmark::State& state)
{
// Dynamic Data Input
int dataSize = state.range(0);
// Set CUDA deivce
hipSetDevice(benchmarkingDevice);
// Get max threads per block
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, benchmarkingDevice);
int threadsPerBlock = prop.maxThreadsPerBlock;
// Calculate Grid size
int gridSize = int(dataSize / threadsPerBlock);
// Generate input data
std::vector<int> inputVector(dataSize, 0);
for (int i = 0; i < dataSize; i++)
{
inputVector[i] = static_cast<int>(rand());
}
int* h_input = &inputVector[0];
// Allocate memory on device
int *d_input, *d_output;
hipMalloc(&d_input, dataSize * sizeof(int));
hipMalloc(&d_output, dataSize * sizeof(int));
// Copy data to device
hipMemcpy(d_input, h_input, dataSize * sizeof(int), hipMemcpyHostToDevice);
while (state.KeepRunning())
{
auto start = std::chrono::high_resolution_clock::now();
BM_multInt << <gridSize, threadsPerBlock >> >(d_input, d_output);
auto end = std::chrono::high_resolution_clock::now();
auto elapsed_seconds =
std::chrono::duration_cast<std::chrono::duration<double>>(
end - start);
state.SetIterationTime(elapsed_seconds.count());
}
// Copy Data from Host to Device
std::vector<int> outputVector(dataSize, 0);
int* h_output = &outputVector[0];
hipMemcpy(h_output, d_output, dataSize * sizeof(int), hipMemcpyDeviceToHost);
// Free memory and reset device
hipFree(d_input);
hipFree(d_output);
hipDeviceReset();
// Calculate Elements processed
state.SetBytesProcessed(state.iterations() * dataSize);
}
BENCHMARK(BM_CUDAIntOPS_GeneratedData)
->UseManualTime()
->MinTime(1.0)
->Args({ 2048 })
->Args({ 8192 })
->Args({ 65536 })
->Args({ 524288 })
->Args({ 8388608 })
->Args({ 16777216 })
->Args({ 33554432 });
__global__ void BM_multFloat2(float2* d_in, float2* d_out)
{
int globalId = blockIdx.x * blockDim.x + threadIdx.x;
float2 x = d_in[globalId];
float2 y = {1.01010101f, 1.01010101f};
y.x = y.x * x.x;
y.y = y.y * x.y;
d_out[globalId] = y;
}
static void BM_CUDAFloat2OPS_GeneratedData(benchmark::State& state)
{
// Dynamic Data Input
int dataSize = state.range(0);
// Set CUDA deivce
hipSetDevice(benchmarkingDevice);
// Get max threads per block
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, benchmarkingDevice);
int threadsPerBlock = prop.maxThreadsPerBlock;
// Calculate Grid size
int gridSize = int(dataSize / threadsPerBlock);
// Generate input data
std::vector<float2> inputVector(dataSize, {0, 0});
for (int i = 0; i < dataSize; i++)
{
inputVector[i] = {static_cast<float>(rand()) * 1.010101f, static_cast<float>(rand()) * 1.010101f};
}
float2* h_input = &inputVector[0];
// Allocate memory on device
float2 *d_input, *d_output;
hipMalloc(&d_input, dataSize * sizeof(float2));
hipMalloc(&d_output, dataSize * sizeof(float2));
// Copy data to device
hipMemcpy(d_input, h_input, dataSize * sizeof(float2), hipMemcpyHostToDevice);
while (state.KeepRunning())
{
auto start = std::chrono::high_resolution_clock::now();
BM_multFloat2 << <gridSize, threadsPerBlock >> >(d_input, d_output);
auto end = std::chrono::high_resolution_clock::now();
auto elapsed_seconds =
std::chrono::duration_cast<std::chrono::duration<double>>(
end - start);
state.SetIterationTime(elapsed_seconds.count());
}
// Copy Data from Host to Device
std::vector<float2> outputVector(dataSize, {0,0});
float2* h_output = &outputVector[0];
hipMemcpy(h_output, d_output, dataSize * sizeof(float2), hipMemcpyDeviceToHost);
// Free memory and reset device
hipFree(d_input);
hipFree(d_output);
hipDeviceReset();
// Calculate Elements processed
state.SetBytesProcessed(state.iterations() * dataSize);
}
BENCHMARK(BM_CUDAFloat2OPS_GeneratedData)
->UseManualTime()
->MinTime(1.0)
->Args({ 2048 })
->Args({ 8192 })
->Args({ 65536 })
->Args({ 524288 })
->Args({ 8388608 })
->Args({ 16777216 })
->Args({ 33554432 });
static void BM_CUDABandwidthHostToDevice(benchmark::State& state)
{
// Dynamic Data Input
int dataSize = state.range(0);
// Set CUDA deivce
hipSetDevice(benchmarkingDevice);
// Get max threads per block
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, benchmarkingDevice);
int threadsPerBlock = prop.maxThreadsPerBlock;
// Generate input data
std::vector<int> inputVector(dataSize, 0);
int* h_input = &inputVector[0];
// Allocate memory on device
int* d_input;
hipMalloc(&d_input, dataSize * sizeof(int));
while (state.KeepRunning())
{
auto start = std::chrono::high_resolution_clock::now();
hipMemcpy(d_input, h_input, dataSize * sizeof(int), hipMemcpyHostToDevice);
auto end = std::chrono::high_resolution_clock::now();
auto elapsed_seconds =
std::chrono::duration_cast<std::chrono::duration<double>>(
end - start);
state.SetIterationTime(elapsed_seconds.count());
}
// Free memory and reset device
hipFree(d_input);
hipDeviceReset();
// Calculate Elements processed
state.SetBytesProcessed(state.iterations() * dataSize * sizeof(int));
}
BENCHMARK(BM_CUDABandwidthHostToDevice)
->UseManualTime()
->MinTime(1.0)
->Args({ 1 })
->Args({ 8 })
->Args({ 16 })
->Args({ 512 })
->Args({ 1024 })
->Args({ 16384 })
->Args({ 131072 })
->Args({ 1048576 })
->Args({ 8388608 })
->Args({ 16777216 })
->Args({ 33554432 })
->Args({ 67108864 })
->Args({ 134217728 });
static void BM_CUDABandwidthDeviceToHost(benchmark::State& state)
{
// Dynamic Data Input
int dataSize = state.range(0);
// Set CUDA deivce
hipSetDevice(benchmarkingDevice);
// Get max threads per block
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, benchmarkingDevice);
int threadsPerBlock = prop.maxThreadsPerBlock;
// Generate input data
std::vector<int> inoutVector(dataSize, 0);
int* h_inout = &inoutVector[0];
// Allocate memory on device
int* d_device;
hipMalloc(&d_device, dataSize * sizeof(int));
// Copy Data from Host to Device
hipMemcpy(d_device, h_inout, dataSize * sizeof(int), hipMemcpyHostToDevice);
while (state.KeepRunning())
{
auto start = std::chrono::high_resolution_clock::now();
hipMemcpy(h_inout, d_device, dataSize * sizeof(int), hipMemcpyDeviceToHost);
auto end = std::chrono::high_resolution_clock::now();
auto elapsed_seconds =
std::chrono::duration_cast<std::chrono::duration<double>>(
end - start);
state.SetIterationTime(elapsed_seconds.count());
}
// Free memory and reset device
hipFree(d_device);
hipDeviceReset();
// Calculate Elements processed
state.SetBytesProcessed(state.iterations() * dataSize * sizeof(int));
}
BENCHMARK(BM_CUDABandwidthHostToDevice)
->UseManualTime()
->MinTime(1.0)
->Args({ 1 })
->Args({ 8 })
->Args({ 16 })
->Args({ 512 })
->Args({ 1024 })
->Args({ 16384 })
->Args({ 131072 })
->Args({ 1048576 })
->Args({ 8388608 })
->Args({ 16777216 })
->Args({ 33554432 })
->Args({ 67108864 })
->Args({ 134217728 });
static void BM_CUDABandwidthDeviceToDevice(benchmark::State& state)
{
// Dynamic Data Input
int dataSize = state.range(0);
// Set CUDA deivce
hipSetDevice(benchmarkingDevice);
// Get max threads per block
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, benchmarkingDevice);
int threadsPerBlock = prop.maxThreadsPerBlock;
// Generate input data
std::vector<int> inputVector(dataSize, 0);
int* h_input = &inputVector[0];
// Allocate memory on device
int *d_device, *d_input;
hipMalloc(&d_device, dataSize * sizeof(int));
hipMalloc(&d_input, dataSize * sizeof(int));
// Copy Data from Host to Device
hipMemcpy(d_input, h_input, dataSize * sizeof(int), hipMemcpyHostToDevice);
while (state.KeepRunning())
{
auto start = std::chrono::high_resolution_clock::now();
hipMemcpy(d_device, d_input, dataSize * sizeof(int), hipMemcpyDeviceToDevice);
auto end = std::chrono::high_resolution_clock::now();
auto elapsed_seconds =
std::chrono::duration_cast<std::chrono::duration<double>>(
end - start);
state.SetIterationTime(elapsed_seconds.count());
}
// Free memory and reset device
hipFree(d_device);
hipDeviceReset();
// Calculate Elements processed
state.SetBytesProcessed(state.iterations() * dataSize * sizeof(int));
}
BENCHMARK(BM_CUDABandwidthDeviceToDevice)
->UseManualTime()
->MinTime(1.0)
->Args({ 1 })
->Args({ 8 })
->Args({ 16 })
->Args({ 512 })
->Args({ 1024 })
->Args({ 16384 })
->Args({ 131072 })
->Args({ 1048576 })
->Args({ 8388608 })
->Args({ 16777216 })
->Args({ 33554432 })
->Args({ 67108864 })
->Args({ 134217728 });
__global__ void BM_gridStrideKernel(const int n, const int* d_in, int* d_out)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = id; i < n; i += blockDim.x * gridDim.x)
{
d_out[i] = d_in[i] * id;
}
}
static void BM_CUDAKernelCreation(benchmark::State& state)
{
// Dynamic Data Input
int kernels = state.range(0);
int dataSize = state.range(1);
int dataPerKernel = static_cast<int>(dataSize / kernels);
// Set CUDA deivce
hipSetDevice(benchmarkingDevice);
// Get max threads per block
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, benchmarkingDevice);
int threadsPerBlock = prop.maxThreadsPerBlock;
// Calculate Grid and Block size
int blockSize = threadsPerBlock > kernels ? kernels : threadsPerBlock;
int gridSize = int(kernels / blockSize);
// Generate input data
std::vector<int> inputVector(dataSize, 1);
int* h_input = &inputVector[0];
// Allocate memory on device
int *d_input, *d_output;
hipMalloc(&d_input, dataSize * sizeof(int));
hipMalloc(&d_output, dataSize * sizeof(int));
// Copy data to device
hipMemcpy(d_input, h_input, dataSize * sizeof(int), hipMemcpyHostToDevice);
while (state.KeepRunning())
{
auto start = std::chrono::high_resolution_clock::now();
BM_gridStrideKernel << <gridSize, blockSize >> >(dataSize, d_input, d_output);
auto end = std::chrono::high_resolution_clock::now();
auto elapsed_seconds =
std::chrono::duration_cast<std::chrono::duration<double>>(
end - start);
state.SetIterationTime(elapsed_seconds.count());
}
// Copy Data from Host to Device
std::vector<int> outputVector(dataSize, 0);
int* h_output = &outputVector[0];
hipMemcpy(h_output, d_output, dataSize * sizeof(int), hipMemcpyDeviceToHost);
// Free memory and reset device
hipFree(d_input);
hipFree(d_output);
hipDeviceReset();
// Calculate Elements processed
state.SetBytesProcessed(state.iterations() * dataSize * sizeof(int));
}
BENCHMARK(BM_CUDAKernelCreation)
->UseManualTime()
->MinTime(1.0)
->Args({ 1, 1 })
->Args({ 1, 10 })
->Args({ 8, 8 })
->Args({ 8, 80 })
->Args({ 16, 16 })
->Args({ 16, 160 })
->Args({ 512, 512 })
->Args({ 512, 5120 })
->Args({ 1024, 1024 })
->Args({ 1024, 10240 })
->Args({ 16384, 16384 })
->Args({ 16384, 163840 })
->Args({ 131072, 131072 })
->Args({ 131072, 1310720 })
->Args({ 1048576, 1048576 })
->Args({ 1048576, 10485760 })
->Args({ 1048576, 104857600 })
->Args({ 131072, 131072 })
->Args({ 65536, 131072 })
->Args({ 32768, 131072 })
->Args({ 16384, 131072 })
->Args({ 8192, 131072 })
->Args({ 4096, 131072 })
->Args({ 2048, 131072 })
->Args({ 1024, 131072 });
__global__ void BM_badGridStrideKernel(const int objectsPerKernel, const int* d_in, int* d_out)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
int startElement = id * objectsPerKernel;
for (int i = startElement; i < (startElement + objectsPerKernel); i++)
{
d_out[i] = d_in[i] * id;
}
}
static void BM_CUDABadMemoryCoalescence(benchmark::State& state)
{
// Dynamic Data Input
int kernels = state.range(0);
int dataSize = state.range(1);
int dataPerKernel = static_cast<int>(dataSize / kernels);
// Set CUDA deivce
hipSetDevice(benchmarkingDevice);
// Get max threads per block
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, benchmarkingDevice);
int threadsPerBlock = prop.maxThreadsPerBlock;
// Calculate Grid and Block size
int blockSize = threadsPerBlock > kernels ? kernels : threadsPerBlock;
int gridSize = int(kernels / blockSize);
// Generate input data
std::vector<int> inputVector(dataSize, 1);
int* h_input = &inputVector[0];
// Allocate memory on device
int *d_input, *d_output;
hipMalloc(&d_input, dataSize * sizeof(int));
hipMalloc(&d_output, dataSize * sizeof(int));
// Copy data to device
hipMemcpy(d_input, h_input, dataSize * sizeof(int), hipMemcpyHostToDevice);
while (state.KeepRunning())
{
auto start = std::chrono::high_resolution_clock::now();
BM_badGridStrideKernel << <gridSize, blockSize >> >(dataPerKernel, d_input, d_output);
auto end = std::chrono::high_resolution_clock::now();
auto elapsed_seconds =
std::chrono::duration_cast<std::chrono::duration<double>>(
end - start);
state.SetIterationTime(elapsed_seconds.count());
}
// Copy Data from Host to Device
std::vector<int> outputVector(dataSize, 0);
int* h_output = &outputVector[0];
hipMemcpy(h_output, d_output, dataSize * sizeof(int), hipMemcpyDeviceToHost);
// Free memory and reset device
hipFree(d_input);
hipFree(d_output);
hipDeviceReset();
// Calculate Elements processed
state.SetBytesProcessed(state.iterations() * dataSize * sizeof(int));
}
BENCHMARK(BM_CUDABadMemoryCoalescence)
->UseManualTime()
->MinTime(1.0)
->Args({512, 4096})
->Args({4096, 4096})
->Args({1024, 8192})
->Args({8192, 8192})
->Args({16384, 131072})
->Args({131072, 131072})
->Args({16384, 131072})
->Args({131072, 131072})
->Args({131072, 1048576})
->Args({1048576, 1048576})
->Args({131072, 1048576})
->Args({1048576, 1048576})
->Args({1048576, 8388608})
->Args({8388608, 8388608});
__global__ void BM_goodGridStrideKernel(const int n, const int* d_in, int* d_out)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = id; i < n; i += blockDim.x * gridDim.x)
{
d_out[i] = d_in[i] * id;
}
}
static void BM_CUDAGoodMemoryCoalescence(benchmark::State& state)
{
// Dynamic Data Input
int kernels = state.range(0);
int dataSize = state.range(1);
int dataPerKernel = static_cast<int>(dataSize / kernels);
// Set CUDA deivce
hipSetDevice(benchmarkingDevice);
// Get max threads per block
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, benchmarkingDevice);
int threadsPerBlock = prop.maxThreadsPerBlock;
// Calculate Grid and Block size
int blockSize = threadsPerBlock > kernels ? kernels : threadsPerBlock;
int gridSize = int(kernels / blockSize);
// Generate input data
std::vector<int> inputVector(dataSize, 1);
int* h_input = &inputVector[0];
// Allocate memory on device
int *d_input, *d_output;
hipMalloc(&d_input, dataSize * sizeof(int));
hipMalloc(&d_output, dataSize * sizeof(int));
// Copy data to device
hipMemcpy(d_input, h_input, dataSize * sizeof(int), hipMemcpyHostToDevice);
while (state.KeepRunning())
{
auto start = std::chrono::high_resolution_clock::now();
BM_goodGridStrideKernel << <gridSize, blockSize >> >(dataSize, d_input, d_output);
auto end = std::chrono::high_resolution_clock::now();
auto elapsed_seconds =
std::chrono::duration_cast<std::chrono::duration<double>>(
end - start);
state.SetIterationTime(elapsed_seconds.count());
}
// Copy Data from Host to Device
std::vector<int> outputVector(dataSize, 0);
int* h_output = &outputVector[0];
hipMemcpy(h_output, d_output, dataSize * sizeof(int), hipMemcpyDeviceToHost);
// Free memory and reset device
hipFree(d_input);
hipFree(d_output);
hipDeviceReset();
// Calculate Elements processed
state.SetBytesProcessed(state.iterations() * dataSize * sizeof(int));
}
BENCHMARK(BM_CUDAGoodMemoryCoalescence)
->UseManualTime()
->MinTime(1.0)
->Args({512, 4096})
->Args({4096, 4096})
->Args({1024, 8192})
->Args({8192, 8192})
->Args({16384, 131072})
->Args({131072, 131072})
->Args({16384, 131072})
->Args({131072, 131072})
->Args({131072, 1048576})
->Args({1048576, 1048576})
->Args({131072, 1048576})
->Args({1048576, 1048576})
->Args({1048576, 8388608})
->Args({8388608, 8388608});
void selectBenchmarkDevice()
{
std::cout << "Select Benchmarking Device:\n";
int nDevices;
hipGetDeviceCount(&nDevices);
std::vector<int> allDevices;
for (int i = 0; i < nDevices; i++)
{
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
std::cout << "[" << i << "] " << prop.name << "\n";
}
std::cout << "\nSelect Device: ";
int deviceSelection;
if (autoSelectDevice == -1)
{
std::cin >> deviceSelection;
}
else
{
deviceSelection = autoSelectDevice;
}
benchmarkingDevice = deviceSelection;
std::cout << "\nSelected Device: " << deviceSelection << "\n\n";
}
int main(int argc, char** argv)
{
selectBenchmarkDevice();
::benchmark::Initialize(&argc, argv);
::benchmark::RunSpecifiedBenchmarks();
}
|
51a9cd7b02f46231d6ff62d47268c55fcd9ae488.cu
|
#include "cuda_benchmark.h"
#include <iostream>
#include <ratio>
#include <chrono>
__global__ void BM_addInt(int* d_a, int* d_b, int* d_c)
{
*d_c = *d_a + *d_b;
}
static void BM_CUDABasicLatencyTest(benchmark::State& state)
{
while (state.KeepRunning())
{
// Set CUDA deivce
cudaSetDevice(benchmarkingDevice);
// Setting Host Memory Variables
auto h_a = 1, h_b = 1, h_c = 0;
// Reserve pointers on Host and allocate memory on device
int *d_a, *d_b, *d_c;
cudaMalloc(&d_a, sizeof(int));
cudaMalloc(&d_b, sizeof(int));
cudaMalloc(&d_c, sizeof(int));
// Move input values to the device
cudaMemcpy(d_a, &h_a, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &h_b, sizeof(int), cudaMemcpyHostToDevice);
BM_addInt << <1, 1 >> >(d_a, d_b, d_c);
// Move output value to the host
cudaMemcpy(&h_c, d_c, sizeof(int), cudaMemcpyDeviceToHost);
// Free memory on the device
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaDeviceReset();
}
}
BENCHMARK(BM_CUDABasicLatencyTest)
->MinTime(1.0);
__global__ void BM_convergedKernel(float* d_in, float* d_out)
{
int globalId = blockIdx.x * blockDim.x + threadIdx.x;
float x = float(d_in[globalId]);
float y = float(threadIdx.x);
if (x <= 10.0f)
{
for (int i = 0; i < (gridDim.x * blockDim.x - 1); i++)
{
y = y + d_in[i];
}
}
else
{
for (int i = 0; i < (gridDim.x * blockDim.x - 1); i++)
{
y = y - d_in[i];
}
}
}
static void BM_CUDAConvergedExecution(benchmark::State& state)
{
// Dynamic Data Input
int dataSize = state.range(0);
// Set CUDA deivce
cudaSetDevice(benchmarkingDevice);
// Get max threads per block
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, benchmarkingDevice);
int threadsPerBlock = prop.maxThreadsPerBlock;
// Calculate Grid size
int gridSize = int(dataSize / threadsPerBlock);
// Generate input data
std::vector<float> inputVector(dataSize, 0);
for (int i = 0; i < dataSize; i++)
{
inputVector[i] = static_cast<float>(rand()) / static_cast<float>(RAND_MAX);
}
float* h_input = &inputVector[0];
// Allocate memory on device
float *d_input, *d_output;
cudaMalloc(&d_input, dataSize * sizeof(float));
cudaMalloc(&d_output, dataSize * sizeof(float));
// Copy data to device
cudaMemcpy(d_input, h_input, dataSize * sizeof(float), cudaMemcpyHostToDevice);
while (state.KeepRunning())
{
auto start = std::chrono::high_resolution_clock::now();
BM_convergedKernel << <gridSize, threadsPerBlock >> >(d_input, d_output);
auto end = std::chrono::high_resolution_clock::now();
auto elapsed_seconds =
std::chrono::duration_cast<std::chrono::duration<double>>(
end - start);
state.SetIterationTime(elapsed_seconds.count());
}
// Copy Data from Host to Device
std::vector<float> outputVector(dataSize, 0);
float* h_output = &outputVector[0];
cudaMemcpy(h_output, d_output, dataSize * sizeof(float), cudaMemcpyDeviceToHost);
// Free memory and reset device
cudaFree(d_input);
cudaFree(d_output);
cudaDeviceReset();
// Calculate Elements processed
state.SetBytesProcessed(state.iterations() * dataSize);
}
BENCHMARK(BM_CUDAConvergedExecution)
->MinTime(1.0)
->UseManualTime()
->Args({ 2048 })
->Args({ 8192 })
->Args({ 65536 })
->Args({ 524288 })
->Args({ 8388608 })
->Args({ 16777216 });
__global__ void BM_divergedKernel(float* d_in, float* d_out)
{
int globalId = blockIdx.x * blockDim.x + threadIdx.x;
float x = float(d_in[globalId]);
float y = float(threadIdx.x);
if (x < 0.5f)
{
for (int i = 0; i < (gridDim.x * blockDim.x - 1); i++)
{
y = y + d_in[i];
}
}
else
{
for (int i = 0; i < (gridDim.x * blockDim.x - 1); i++)
{
y = y - d_in[i];
}
}
}
static void BM_CUDADivergedExecution(benchmark::State& state)
{
// Dynamic Data Input
int dataSize = state.range(0);
// Set CUDA deivce
cudaSetDevice(benchmarkingDevice);
// Get max threads per block
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, benchmarkingDevice);
int threadsPerBlock = prop.maxThreadsPerBlock;
// Calculate Grid size
int gridSize = int(dataSize / threadsPerBlock);
// Generate input data
std::vector<float> inputVector(dataSize, 0);
for (int i = 0; i < dataSize; i++)
{
inputVector[i] = static_cast<float>(rand()) / static_cast<float>(RAND_MAX);
}
float* h_input = &inputVector[0];
// Allocate memory on device
float *d_input, *d_output;
cudaMalloc(&d_input, dataSize * sizeof(float));
cudaMalloc(&d_output, dataSize * sizeof(float));
// Copy data to device
cudaMemcpy(d_input, h_input, dataSize * sizeof(float), cudaMemcpyHostToDevice);
while (state.KeepRunning())
{
auto start = std::chrono::high_resolution_clock::now();
BM_divergedKernel << <gridSize, threadsPerBlock >> >(d_input, d_output);
auto end = std::chrono::high_resolution_clock::now();
auto elapsed_seconds =
std::chrono::duration_cast<std::chrono::duration<double>>(
end - start);
state.SetIterationTime(elapsed_seconds.count());
}
// Copy Data from Host to Device
std::vector<float> outputVector(dataSize, 0);
float* h_output = &outputVector[0];
cudaMemcpy(h_output, d_output, dataSize * sizeof(float), cudaMemcpyDeviceToHost);
// Free memory and reset device
cudaFree(d_input);
cudaFree(d_output);
cudaDeviceReset();
// Calculate Elements processed
state.SetBytesProcessed(state.iterations() * dataSize);
}
BENCHMARK(BM_CUDADivergedExecution)
->MinTime(1.0)
->UseManualTime()
->Args({2048})
->Args({8192})
->Args({65536})
->Args({524288})
->Args({8388608})
->Args({16777216});
__global__ void BM_multiDivergedKernel(float* d_in, float* d_out)
{
int globalId = blockIdx.x * blockDim.x + threadIdx.x;
float x = float(d_in[globalId]);
float y = float(threadIdx.x);
int threadId = threadIdx.x;
switch (threadId)
{
case 1: y = 32;
break;
case 2: y = 33;
break;
case 3: y = 34;
break;
case 4: y = 35;
break;
case 5: y = 36;
break;
case 6: y = 37;
break;
case 7: y = 38;
break;
case 8: y = 39;
break;
case 9: y = 40;
break;
case 10: y = 41;
break;
default: y = 42;
break;
}
y = y * x;
d_out[globalId] = y;
}
static void BM_CUDAMultiDivergedExecution(benchmark::State& state)
{
// Dynamic Data Input
int dataSize = state.range(0);
// Set CUDA deivce
cudaSetDevice(benchmarkingDevice);
// Get max threads per block
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, benchmarkingDevice);
int threadsPerBlock = prop.maxThreadsPerBlock;
// Calculate Grid size
int gridSize = int(dataSize / threadsPerBlock);
// Generate input data
std::vector<float> inputVector(dataSize, 0);
for (int i = 0; i < dataSize; i++)
{
inputVector[i] = static_cast<float>(rand()) / static_cast<float>(RAND_MAX);
}
float* h_input = &inputVector[0];
// Allocate memory on device
float *d_input, *d_output;
cudaMalloc(&d_input, dataSize * sizeof(float));
cudaMalloc(&d_output, dataSize * sizeof(float));
// Copy data to device
cudaMemcpy(d_input, h_input, dataSize * sizeof(float), cudaMemcpyHostToDevice);
while (state.KeepRunning())
{
auto start = std::chrono::high_resolution_clock::now();
BM_divergedKernel << <gridSize, threadsPerBlock >> >(d_input, d_output);
auto end = std::chrono::high_resolution_clock::now();
auto elapsed_seconds =
std::chrono::duration_cast<std::chrono::duration<double>>(
end - start);
state.SetIterationTime(elapsed_seconds.count());
}
// Copy Data from Host to Device
std::vector<float> outputVector(dataSize, 0);
float* h_output = &outputVector[0];
cudaMemcpy(h_output, d_output, dataSize * sizeof(float), cudaMemcpyDeviceToHost);
// Free memory and reset device
cudaFree(d_input);
cudaFree(d_output);
cudaDeviceReset();
// Calculate Elements processed
state.SetBytesProcessed(state.iterations() * dataSize);
}
BENCHMARK(BM_CUDAMultiDivergedExecution)
->MinTime(1.0)
->UseManualTime()
->Args({2048})
->Args({8192})
->Args({65536})
->Args({524288})
->Args({8388608})
->Args({16777216});
__global__ void BM_multFloat(float* d_in, float* d_out)
{
int globalId = blockIdx.x * blockDim.x + threadIdx.x;
float x = d_in[globalId];
float y = float(threadIdx.x);
y = y * x;
d_out[globalId] = y;
}
static void BM_CUDAFLOPS_GeneratedData(benchmark::State& state)
{
// Dynamic Data Input
int dataSize = state.range(0);
// Set CUDA deivce
cudaSetDevice(benchmarkingDevice);
// Get max threads per block
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, benchmarkingDevice);
int threadsPerBlock = prop.maxThreadsPerBlock;
// Calculate Grid size
int gridSize = int(dataSize / threadsPerBlock);
// Generate input data
std::vector<float> inputVector(dataSize, 0);
for (int i = 0; i < dataSize; i++)
{
inputVector[i] = static_cast<float>(rand()) / static_cast<float>(RAND_MAX);
}
float* h_input = &inputVector[0];
// Allocate memory on device
float *d_input, *d_output;
cudaMalloc(&d_input, dataSize * sizeof(float));
cudaMalloc(&d_output, dataSize * sizeof(float));
// Copy data to device
cudaMemcpy(d_input, h_input, dataSize * sizeof(float), cudaMemcpyHostToDevice);
while (state.KeepRunning())
{
auto start = std::chrono::high_resolution_clock::now();
BM_multFloat << <gridSize, threadsPerBlock >> >(d_input, d_output);
auto end = std::chrono::high_resolution_clock::now();
auto elapsed_seconds =
std::chrono::duration_cast<std::chrono::duration<double>>(
end - start);
state.SetIterationTime(elapsed_seconds.count());
}
// Copy Data from Host to Device
std::vector<float> outputVector(dataSize, 0);
float* h_output = &outputVector[0];
cudaMemcpy(h_output, d_output, dataSize * sizeof(float), cudaMemcpyDeviceToHost);
// Free memory and reset device
cudaFree(d_input);
cudaFree(d_output);
cudaDeviceReset();
// Calculate Elements processed
state.SetBytesProcessed(state.iterations() * dataSize);
}
BENCHMARK(BM_CUDAFLOPS_GeneratedData)
->UseManualTime()
->MinTime(1.0)
->Args({ 2048 })
->Args({ 8192 })
->Args({ 65536 })
->Args({ 524288 })
->Args({ 8388608 })
->Args({ 16777216 })
->Args({ 33554432 });
__global__ void BM_multInt(int* d_in, int* d_out)
{
int globalId = blockIdx.x * blockDim.x + threadIdx.x;
int x = d_in[globalId];
int y = threadIdx.x;
y = y * x;
d_out[globalId] = y;
}
static void BM_CUDAIntOPS_GeneratedData(benchmark::State& state)
{
// Dynamic Data Input
int dataSize = state.range(0);
// Set CUDA deivce
cudaSetDevice(benchmarkingDevice);
// Get max threads per block
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, benchmarkingDevice);
int threadsPerBlock = prop.maxThreadsPerBlock;
// Calculate Grid size
int gridSize = int(dataSize / threadsPerBlock);
// Generate input data
std::vector<int> inputVector(dataSize, 0);
for (int i = 0; i < dataSize; i++)
{
inputVector[i] = static_cast<int>(rand());
}
int* h_input = &inputVector[0];
// Allocate memory on device
int *d_input, *d_output;
cudaMalloc(&d_input, dataSize * sizeof(int));
cudaMalloc(&d_output, dataSize * sizeof(int));
// Copy data to device
cudaMemcpy(d_input, h_input, dataSize * sizeof(int), cudaMemcpyHostToDevice);
while (state.KeepRunning())
{
auto start = std::chrono::high_resolution_clock::now();
BM_multInt << <gridSize, threadsPerBlock >> >(d_input, d_output);
auto end = std::chrono::high_resolution_clock::now();
auto elapsed_seconds =
std::chrono::duration_cast<std::chrono::duration<double>>(
end - start);
state.SetIterationTime(elapsed_seconds.count());
}
// Copy Data from Host to Device
std::vector<int> outputVector(dataSize, 0);
int* h_output = &outputVector[0];
cudaMemcpy(h_output, d_output, dataSize * sizeof(int), cudaMemcpyDeviceToHost);
// Free memory and reset device
cudaFree(d_input);
cudaFree(d_output);
cudaDeviceReset();
// Calculate Elements processed
state.SetBytesProcessed(state.iterations() * dataSize);
}
BENCHMARK(BM_CUDAIntOPS_GeneratedData)
->UseManualTime()
->MinTime(1.0)
->Args({ 2048 })
->Args({ 8192 })
->Args({ 65536 })
->Args({ 524288 })
->Args({ 8388608 })
->Args({ 16777216 })
->Args({ 33554432 });
__global__ void BM_multFloat2(float2* d_in, float2* d_out)
{
int globalId = blockIdx.x * blockDim.x + threadIdx.x;
float2 x = d_in[globalId];
float2 y = {1.01010101f, 1.01010101f};
y.x = y.x * x.x;
y.y = y.y * x.y;
d_out[globalId] = y;
}
static void BM_CUDAFloat2OPS_GeneratedData(benchmark::State& state)
{
// Dynamic Data Input
int dataSize = state.range(0);
// Set CUDA deivce
cudaSetDevice(benchmarkingDevice);
// Get max threads per block
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, benchmarkingDevice);
int threadsPerBlock = prop.maxThreadsPerBlock;
// Calculate Grid size
int gridSize = int(dataSize / threadsPerBlock);
// Generate input data
std::vector<float2> inputVector(dataSize, {0, 0});
for (int i = 0; i < dataSize; i++)
{
inputVector[i] = {static_cast<float>(rand()) * 1.010101f, static_cast<float>(rand()) * 1.010101f};
}
float2* h_input = &inputVector[0];
// Allocate memory on device
float2 *d_input, *d_output;
cudaMalloc(&d_input, dataSize * sizeof(float2));
cudaMalloc(&d_output, dataSize * sizeof(float2));
// Copy data to device
cudaMemcpy(d_input, h_input, dataSize * sizeof(float2), cudaMemcpyHostToDevice);
while (state.KeepRunning())
{
auto start = std::chrono::high_resolution_clock::now();
BM_multFloat2 << <gridSize, threadsPerBlock >> >(d_input, d_output);
auto end = std::chrono::high_resolution_clock::now();
auto elapsed_seconds =
std::chrono::duration_cast<std::chrono::duration<double>>(
end - start);
state.SetIterationTime(elapsed_seconds.count());
}
// Copy Data from Host to Device
std::vector<float2> outputVector(dataSize, {0,0});
float2* h_output = &outputVector[0];
cudaMemcpy(h_output, d_output, dataSize * sizeof(float2), cudaMemcpyDeviceToHost);
// Free memory and reset device
cudaFree(d_input);
cudaFree(d_output);
cudaDeviceReset();
// Calculate Elements processed
state.SetBytesProcessed(state.iterations() * dataSize);
}
BENCHMARK(BM_CUDAFloat2OPS_GeneratedData)
->UseManualTime()
->MinTime(1.0)
->Args({ 2048 })
->Args({ 8192 })
->Args({ 65536 })
->Args({ 524288 })
->Args({ 8388608 })
->Args({ 16777216 })
->Args({ 33554432 });
static void BM_CUDABandwidthHostToDevice(benchmark::State& state)
{
// Dynamic Data Input
int dataSize = state.range(0);
// Set CUDA deivce
cudaSetDevice(benchmarkingDevice);
// Get max threads per block
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, benchmarkingDevice);
int threadsPerBlock = prop.maxThreadsPerBlock;
// Generate input data
std::vector<int> inputVector(dataSize, 0);
int* h_input = &inputVector[0];
// Allocate memory on device
int* d_input;
cudaMalloc(&d_input, dataSize * sizeof(int));
while (state.KeepRunning())
{
auto start = std::chrono::high_resolution_clock::now();
cudaMemcpy(d_input, h_input, dataSize * sizeof(int), cudaMemcpyHostToDevice);
auto end = std::chrono::high_resolution_clock::now();
auto elapsed_seconds =
std::chrono::duration_cast<std::chrono::duration<double>>(
end - start);
state.SetIterationTime(elapsed_seconds.count());
}
// Free memory and reset device
cudaFree(d_input);
cudaDeviceReset();
// Calculate Elements processed
state.SetBytesProcessed(state.iterations() * dataSize * sizeof(int));
}
BENCHMARK(BM_CUDABandwidthHostToDevice)
->UseManualTime()
->MinTime(1.0)
->Args({ 1 })
->Args({ 8 })
->Args({ 16 })
->Args({ 512 })
->Args({ 1024 })
->Args({ 16384 })
->Args({ 131072 })
->Args({ 1048576 })
->Args({ 8388608 })
->Args({ 16777216 })
->Args({ 33554432 })
->Args({ 67108864 })
->Args({ 134217728 });
static void BM_CUDABandwidthDeviceToHost(benchmark::State& state)
{
// Dynamic Data Input
int dataSize = state.range(0);
// Set CUDA deivce
cudaSetDevice(benchmarkingDevice);
// Get max threads per block
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, benchmarkingDevice);
int threadsPerBlock = prop.maxThreadsPerBlock;
// Generate input data
std::vector<int> inoutVector(dataSize, 0);
int* h_inout = &inoutVector[0];
// Allocate memory on device
int* d_device;
cudaMalloc(&d_device, dataSize * sizeof(int));
// Copy Data from Host to Device
cudaMemcpy(d_device, h_inout, dataSize * sizeof(int), cudaMemcpyHostToDevice);
while (state.KeepRunning())
{
auto start = std::chrono::high_resolution_clock::now();
cudaMemcpy(h_inout, d_device, dataSize * sizeof(int), cudaMemcpyDeviceToHost);
auto end = std::chrono::high_resolution_clock::now();
auto elapsed_seconds =
std::chrono::duration_cast<std::chrono::duration<double>>(
end - start);
state.SetIterationTime(elapsed_seconds.count());
}
// Free memory and reset device
cudaFree(d_device);
cudaDeviceReset();
// Calculate Elements processed
state.SetBytesProcessed(state.iterations() * dataSize * sizeof(int));
}
BENCHMARK(BM_CUDABandwidthHostToDevice)
->UseManualTime()
->MinTime(1.0)
->Args({ 1 })
->Args({ 8 })
->Args({ 16 })
->Args({ 512 })
->Args({ 1024 })
->Args({ 16384 })
->Args({ 131072 })
->Args({ 1048576 })
->Args({ 8388608 })
->Args({ 16777216 })
->Args({ 33554432 })
->Args({ 67108864 })
->Args({ 134217728 });
static void BM_CUDABandwidthDeviceToDevice(benchmark::State& state)
{
// Dynamic Data Input
int dataSize = state.range(0);
// Set CUDA deivce
cudaSetDevice(benchmarkingDevice);
// Get max threads per block
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, benchmarkingDevice);
int threadsPerBlock = prop.maxThreadsPerBlock;
// Generate input data
std::vector<int> inputVector(dataSize, 0);
int* h_input = &inputVector[0];
// Allocate memory on device
int *d_device, *d_input;
cudaMalloc(&d_device, dataSize * sizeof(int));
cudaMalloc(&d_input, dataSize * sizeof(int));
// Copy Data from Host to Device
cudaMemcpy(d_input, h_input, dataSize * sizeof(int), cudaMemcpyHostToDevice);
while (state.KeepRunning())
{
auto start = std::chrono::high_resolution_clock::now();
cudaMemcpy(d_device, d_input, dataSize * sizeof(int), cudaMemcpyDeviceToDevice);
auto end = std::chrono::high_resolution_clock::now();
auto elapsed_seconds =
std::chrono::duration_cast<std::chrono::duration<double>>(
end - start);
state.SetIterationTime(elapsed_seconds.count());
}
// Free memory and reset device
cudaFree(d_device);
cudaDeviceReset();
// Calculate Elements processed
state.SetBytesProcessed(state.iterations() * dataSize * sizeof(int));
}
BENCHMARK(BM_CUDABandwidthDeviceToDevice)
->UseManualTime()
->MinTime(1.0)
->Args({ 1 })
->Args({ 8 })
->Args({ 16 })
->Args({ 512 })
->Args({ 1024 })
->Args({ 16384 })
->Args({ 131072 })
->Args({ 1048576 })
->Args({ 8388608 })
->Args({ 16777216 })
->Args({ 33554432 })
->Args({ 67108864 })
->Args({ 134217728 });
__global__ void BM_gridStrideKernel(const int n, const int* d_in, int* d_out)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = id; i < n; i += blockDim.x * gridDim.x)
{
d_out[i] = d_in[i] * id;
}
}
static void BM_CUDAKernelCreation(benchmark::State& state)
{
// Dynamic Data Input
int kernels = state.range(0);
int dataSize = state.range(1);
int dataPerKernel = static_cast<int>(dataSize / kernels);
// Set CUDA deivce
cudaSetDevice(benchmarkingDevice);
// Get max threads per block
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, benchmarkingDevice);
int threadsPerBlock = prop.maxThreadsPerBlock;
// Calculate Grid and Block size
int blockSize = threadsPerBlock > kernels ? kernels : threadsPerBlock;
int gridSize = int(kernels / blockSize);
// Generate input data
std::vector<int> inputVector(dataSize, 1);
int* h_input = &inputVector[0];
// Allocate memory on device
int *d_input, *d_output;
cudaMalloc(&d_input, dataSize * sizeof(int));
cudaMalloc(&d_output, dataSize * sizeof(int));
// Copy data to device
cudaMemcpy(d_input, h_input, dataSize * sizeof(int), cudaMemcpyHostToDevice);
while (state.KeepRunning())
{
auto start = std::chrono::high_resolution_clock::now();
BM_gridStrideKernel << <gridSize, blockSize >> >(dataSize, d_input, d_output);
auto end = std::chrono::high_resolution_clock::now();
auto elapsed_seconds =
std::chrono::duration_cast<std::chrono::duration<double>>(
end - start);
state.SetIterationTime(elapsed_seconds.count());
}
// Copy Data from Host to Device
std::vector<int> outputVector(dataSize, 0);
int* h_output = &outputVector[0];
cudaMemcpy(h_output, d_output, dataSize * sizeof(int), cudaMemcpyDeviceToHost);
// Free memory and reset device
cudaFree(d_input);
cudaFree(d_output);
cudaDeviceReset();
// Calculate Elements processed
state.SetBytesProcessed(state.iterations() * dataSize * sizeof(int));
}
BENCHMARK(BM_CUDAKernelCreation)
->UseManualTime()
->MinTime(1.0)
->Args({ 1, 1 })
->Args({ 1, 10 })
->Args({ 8, 8 })
->Args({ 8, 80 })
->Args({ 16, 16 })
->Args({ 16, 160 })
->Args({ 512, 512 })
->Args({ 512, 5120 })
->Args({ 1024, 1024 })
->Args({ 1024, 10240 })
->Args({ 16384, 16384 })
->Args({ 16384, 163840 })
->Args({ 131072, 131072 })
->Args({ 131072, 1310720 })
->Args({ 1048576, 1048576 })
->Args({ 1048576, 10485760 })
->Args({ 1048576, 104857600 })
->Args({ 131072, 131072 })
->Args({ 65536, 131072 })
->Args({ 32768, 131072 })
->Args({ 16384, 131072 })
->Args({ 8192, 131072 })
->Args({ 4096, 131072 })
->Args({ 2048, 131072 })
->Args({ 1024, 131072 });
__global__ void BM_badGridStrideKernel(const int objectsPerKernel, const int* d_in, int* d_out)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
int startElement = id * objectsPerKernel;
for (int i = startElement; i < (startElement + objectsPerKernel); i++)
{
d_out[i] = d_in[i] * id;
}
}
static void BM_CUDABadMemoryCoalescence(benchmark::State& state)
{
// Dynamic Data Input
int kernels = state.range(0);
int dataSize = state.range(1);
int dataPerKernel = static_cast<int>(dataSize / kernels);
// Set CUDA deivce
cudaSetDevice(benchmarkingDevice);
// Get max threads per block
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, benchmarkingDevice);
int threadsPerBlock = prop.maxThreadsPerBlock;
// Calculate Grid and Block size
int blockSize = threadsPerBlock > kernels ? kernels : threadsPerBlock;
int gridSize = int(kernels / blockSize);
// Generate input data
std::vector<int> inputVector(dataSize, 1);
int* h_input = &inputVector[0];
// Allocate memory on device
int *d_input, *d_output;
cudaMalloc(&d_input, dataSize * sizeof(int));
cudaMalloc(&d_output, dataSize * sizeof(int));
// Copy data to device
cudaMemcpy(d_input, h_input, dataSize * sizeof(int), cudaMemcpyHostToDevice);
while (state.KeepRunning())
{
auto start = std::chrono::high_resolution_clock::now();
BM_badGridStrideKernel << <gridSize, blockSize >> >(dataPerKernel, d_input, d_output);
auto end = std::chrono::high_resolution_clock::now();
auto elapsed_seconds =
std::chrono::duration_cast<std::chrono::duration<double>>(
end - start);
state.SetIterationTime(elapsed_seconds.count());
}
// Copy Data from Host to Device
std::vector<int> outputVector(dataSize, 0);
int* h_output = &outputVector[0];
cudaMemcpy(h_output, d_output, dataSize * sizeof(int), cudaMemcpyDeviceToHost);
// Free memory and reset device
cudaFree(d_input);
cudaFree(d_output);
cudaDeviceReset();
// Calculate Elements processed
state.SetBytesProcessed(state.iterations() * dataSize * sizeof(int));
}
BENCHMARK(BM_CUDABadMemoryCoalescence)
->UseManualTime()
->MinTime(1.0)
->Args({512, 4096})
->Args({4096, 4096})
->Args({1024, 8192})
->Args({8192, 8192})
->Args({16384, 131072})
->Args({131072, 131072})
->Args({16384, 131072})
->Args({131072, 131072})
->Args({131072, 1048576})
->Args({1048576, 1048576})
->Args({131072, 1048576})
->Args({1048576, 1048576})
->Args({1048576, 8388608})
->Args({8388608, 8388608});
__global__ void BM_goodGridStrideKernel(const int n, const int* d_in, int* d_out)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = id; i < n; i += blockDim.x * gridDim.x)
{
d_out[i] = d_in[i] * id;
}
}
static void BM_CUDAGoodMemoryCoalescence(benchmark::State& state)
{
// Dynamic Data Input
int kernels = state.range(0);
int dataSize = state.range(1);
int dataPerKernel = static_cast<int>(dataSize / kernels);
// Set CUDA deivce
cudaSetDevice(benchmarkingDevice);
// Get max threads per block
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, benchmarkingDevice);
int threadsPerBlock = prop.maxThreadsPerBlock;
// Calculate Grid and Block size
int blockSize = threadsPerBlock > kernels ? kernels : threadsPerBlock;
int gridSize = int(kernels / blockSize);
// Generate input data
std::vector<int> inputVector(dataSize, 1);
int* h_input = &inputVector[0];
// Allocate memory on device
int *d_input, *d_output;
cudaMalloc(&d_input, dataSize * sizeof(int));
cudaMalloc(&d_output, dataSize * sizeof(int));
// Copy data to device
cudaMemcpy(d_input, h_input, dataSize * sizeof(int), cudaMemcpyHostToDevice);
while (state.KeepRunning())
{
auto start = std::chrono::high_resolution_clock::now();
BM_goodGridStrideKernel << <gridSize, blockSize >> >(dataSize, d_input, d_output);
auto end = std::chrono::high_resolution_clock::now();
auto elapsed_seconds =
std::chrono::duration_cast<std::chrono::duration<double>>(
end - start);
state.SetIterationTime(elapsed_seconds.count());
}
// Copy Data from Host to Device
std::vector<int> outputVector(dataSize, 0);
int* h_output = &outputVector[0];
cudaMemcpy(h_output, d_output, dataSize * sizeof(int), cudaMemcpyDeviceToHost);
// Free memory and reset device
cudaFree(d_input);
cudaFree(d_output);
cudaDeviceReset();
// Calculate Elements processed
state.SetBytesProcessed(state.iterations() * dataSize * sizeof(int));
}
BENCHMARK(BM_CUDAGoodMemoryCoalescence)
->UseManualTime()
->MinTime(1.0)
->Args({512, 4096})
->Args({4096, 4096})
->Args({1024, 8192})
->Args({8192, 8192})
->Args({16384, 131072})
->Args({131072, 131072})
->Args({16384, 131072})
->Args({131072, 131072})
->Args({131072, 1048576})
->Args({1048576, 1048576})
->Args({131072, 1048576})
->Args({1048576, 1048576})
->Args({1048576, 8388608})
->Args({8388608, 8388608});
void selectBenchmarkDevice()
{
std::cout << "Select Benchmarking Device:\n";
int nDevices;
cudaGetDeviceCount(&nDevices);
std::vector<int> allDevices;
for (int i = 0; i < nDevices; i++)
{
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
std::cout << "[" << i << "] " << prop.name << "\n";
}
std::cout << "\nSelect Device: ";
int deviceSelection;
if (autoSelectDevice == -1)
{
std::cin >> deviceSelection;
}
else
{
deviceSelection = autoSelectDevice;
}
benchmarkingDevice = deviceSelection;
std::cout << "\nSelected Device: " << deviceSelection << "\n\n";
}
int main(int argc, char** argv)
{
selectBenchmarkDevice();
::benchmark::Initialize(&argc, argv);
::benchmark::RunSpecifiedBenchmarks();
}
|
7ae73d9f5d08732d76b486ded01a8fa387fd6454.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hiprand/hiprand_kernel.h>
#include <cstdlib>
#include "HeunSolver.hpp"
#include "NonlinearProblem.hpp"
#include "parameters.hpp"
__device__ HeunSolver::HeunSolver( float dt, hiprandState_t global_state, NonlinearProblem*
pProblem)
{
mDt = dt;
mState = global_state;
mpProblem = pProblem;
}
__device__ void HeunSolver::HeunStep( float t, float2* u)
{
float2 f0[noNeurons], f1[noNeurons], k1[noNeurons];
// Take Euler step
mpProblem->ComputeF( t, u, f0);
mpProblem->Coupling( u, f0);
# pragma unroll
for (int i=0;i<noNeurons;++i)
{
k1[i].x = u[i].x + f0[i].x*mDt + alpha*sqrt(mDt)*make_rand();
k1[i].y = u[i].y + f0[i].y*mDt + alpha*sqrt(mDt)*make_rand();;
}
// Make prediction step
mpProblem->ComputeF( t+mDt, k1, f1);
mpProblem->Coupling( k1, f1);
# pragma unroll
for (int i=0;i<noNeurons;++i)
{
u[i].x += mDt/2.0f*(f0[i].x+f1[i].x) + alpha*sqrt(mDt)*make_rand();
u[i].y += mDt/2.0f*(f0[i].y+f1[i].y) + alpha*sqrt(mDt)*make_rand();
}
}
__device__ float HeunSolver::make_rand()
{
return hiprand_normal( &mState);
}
|
7ae73d9f5d08732d76b486ded01a8fa387fd6454.cu
|
#include <curand_kernel.h>
#include <cstdlib>
#include "HeunSolver.hpp"
#include "NonlinearProblem.hpp"
#include "parameters.hpp"
__device__ HeunSolver::HeunSolver( float dt, curandState global_state, NonlinearProblem*
pProblem)
{
mDt = dt;
mState = global_state;
mpProblem = pProblem;
}
__device__ void HeunSolver::HeunStep( float t, float2* u)
{
float2 f0[noNeurons], f1[noNeurons], k1[noNeurons];
// Take Euler step
mpProblem->ComputeF( t, u, f0);
mpProblem->Coupling( u, f0);
# pragma unroll
for (int i=0;i<noNeurons;++i)
{
k1[i].x = u[i].x + f0[i].x*mDt + alpha*sqrt(mDt)*make_rand();
k1[i].y = u[i].y + f0[i].y*mDt + alpha*sqrt(mDt)*make_rand();;
}
// Make prediction step
mpProblem->ComputeF( t+mDt, k1, f1);
mpProblem->Coupling( k1, f1);
# pragma unroll
for (int i=0;i<noNeurons;++i)
{
u[i].x += mDt/2.0f*(f0[i].x+f1[i].x) + alpha*sqrt(mDt)*make_rand();
u[i].y += mDt/2.0f*(f0[i].y+f1[i].y) + alpha*sqrt(mDt)*make_rand();
}
}
__device__ float HeunSolver::make_rand()
{
return curand_normal( &mState);
}
|
abbc83ee03635d6a46dea927ba6e11836f000f69.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Proj 3-2 SKELETON
*/
#include <float.h>
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cutil.h>
#include "utils.h"
/* Does a horizontal flip of the array arr */
void flip_horizontal(float *arr, int width) {
/* YOU MAY WISH TO IMPLEMENT THIS */
}
/* Transposes the square array ARR. */
void transpose(float *arr, int width) {
/* YOU MAY WISH TO IMPLEMENT THIS */
}
/* Rotates the square array ARR by 90 degrees counterclockwise. */
void rotate_ccw_90(float *arr, int width) {
/* YOU MAY WISH TO IMPLEMENT THIS */
}
__global__ void reduction4096Kernel(float* gpu_result, int len, int level) {
int arrayIndex = 2*level*(blockIdx.y*4096 + blockIdx.x*512 + threadIdx.x);
if (arrayIndex + level < len) {
if (gpu_result[arrayIndex + level] < gpu_result[arrayIndex]) {
gpu_result[arrayIndex] = gpu_result[arrayIndex + level];
}
}
}
__global__ void leastDistanceKernel (float* A, float* B, float* C, int len);
__global__ void leastDistance4096Kernel(float *image, float *temp, float *result, int translation_width, int i, int j) {
float dist = temp[blockIdx.y*4096 + blockIdx.x*512 + threadIdx.x] - image[blockIdx.y*4096 + blockIdx.x*512 + threadIdx.x + i*translation_width + j];
result[blockIdx.x*512 + threadIdx.x] = dist * dist;
}
__global__ void distance4096Kernel(float* gpu_image, float* gpu_temp, float* gpu_result, float* gpu_test, int num_translations,
int offset, int t_width, int i_width) {
int thread_index = offset + blockIdx.x * blockDim.x + threadIdx.x;
if (thread_index < (num_translations * t_width * t_width)) {
int pixel_index = thread_index / num_translations;
int distance = gpu_temp[pixel_index]
- gpu_image[thread_index % num_translations + (pixel_index / t_width) * i_width + pixel_index % t_width];
if (thread_index < 100) {
gpu_test[thread_index] = distance;
}
gpu_result[thread_index % num_translations] += distance * distance;
}
}
__global__ void reductionKernel_old2(float* gpu_result, int num_iterations, int level, int offset) {
int thread_index = offset + 2 * level * (blockIdx.x * blockDim.x + threadIdx.x);
if (thread_index + level < num_iterations) {
if (gpu_result[thread_index + level] < gpu_result[thread_index]) {
gpu_result[thread_index] = gpu_result[thread_index + level];
}
}
}
__global__ void distanceSerialKernel(float* gpu_image, float* gpu_temp, float* gpu_result, float* gpu_test, int num_translations,
int i_width, int t_width, int translation_width, int translations_per_block) {
int trans_num = threadIdx.x;
int pixel_num = threadIdx.y + translations_per_block * blockIdx.x;
if (pixel_num < (t_width * t_width)) {
gpu_test[99] += 1;
gpu_test[98] += threadIdx.x;
float distance = gpu_temp[pixel_num] - gpu_image[((int) (trans_num / translation_width)) * i_width + ((int) (pixel_num / t_width)) * i_width + trans_num % translation_width + pixel_num % t_width];
if (pixel_num == 0 && trans_num < 98) {
gpu_test[trans_num] = distance * distance;
}
gpu_result[trans_num] += distance * distance;
}
}
float calc_min_dist(float *gpu_image, int i_width, int i_height, float *gpu_temp, int t_width) {
float least_distance = UINT_MAX;
if (t_width == 4096) {
int threads_per_block = 512;
int blocks_per_grid = 65534;
int translation_height = i_height - t_width + 1;
int translation_width = i_width - t_width + 1;
int num_translations = translation_height * translation_width;
float new_distance;
float* result = (float *)malloc(num_translations*sizeof(float));
if (result == NULL) {
printf("Unable to allocate space for result");
exit(EXIT_FAILURE);
}
for (int counter = 0; counter < num_translations; counter++) {
result[counter] = 0.0;
}
float* gpu_result;
size_t arraySize = num_translations*sizeof(float);
CUDA_SAFE_CALL(hipMalloc(&gpu_result, arraySize));
CUDA_SAFE_CALL(hipMemcpy(gpu_result, result, num_translations*sizeof(float),
hipMemcpyHostToDevice));
float* test = (float *)malloc(100*sizeof(float));
test[99] = 0;
float* gpu_test;
size_t test_size = 100*sizeof(float);
CUDA_SAFE_CALL(hipMalloc(&gpu_test, test_size));
CUDA_SAFE_CALL(hipMemcpy(gpu_test, test, test_size,
hipMemcpyHostToDevice));
printf("%d\n", 3);
///////////////////
// int blocks_per_comparison = 32768;
for (int i = 0; i < translation_height; i++) {
for (int j = 0; j < translation_width; j++) {
dim3 dim_threads_per_block(threads_per_block, 1, 1);
dim3 dim_blocks_per_grid(8, 4096);
hipLaunchKernelGGL(( leastDistance4096Kernel), dim3(dim_blocks_per_grid), dim3(dim_threads_per_block), 0, 0,
gpu_image, gpu_temp, gpu_result, translation_width, i, j);
hipDeviceSynchronize();
CUT_CHECK_ERROR("");
int level = 1;
while (level != (8*4096)) {
printf("level = %d\n", level);
blocks_per_grid = 8*4096;
dim3 dim_threads_per_block(threads_per_block, 1, 1);
dim3 dim_blocks_per_grid(blocks_per_grid, 1);
hipLaunchKernelGGL(( reduction4096Kernel), dim3(dim_blocks_per_grid), dim3(dim_threads_per_block), 0, 0,
gpu_result, 4096*4096, level);
hipDeviceSynchronize();
CUT_CHECK_ERROR("");
level *= 2;
blocks_per_grid /= 2;
if (blocks_per_grid == 0) {
blocks_per_grid = 1;
}
}
CUDA_SAFE_CALL(hipMemcpy(&new_distance, gpu_result, sizeof(float),
hipMemcpyDeviceToHost));
if (new_distance < least_distance) {
least_distance = new_distance;
}
}
}
/*
/////////////////
// int num_operations = num_translations * t_width * t_width;
// int num_per_iter = threads_per_block * blocks_per_grid;
if (num_translations < threads_per_block) {
int translations_per_block = threads_per_block / num_translations;
int num_blocks = num_translations / translations_per_block + 1;
while (num_blocks > 0) {
if (num_blocks > blocks_per_grid) {
printf("%d translations per block!\n", translations_per_block);
dim3 dim_threads_per_block(num_translations, translations_per_block, 1);
dim3 dim_blocks_per_grid(blocks_per_grid, 1);
distanceSerialKernel<<<dim_blocks_per_grid, dim_threads_per_block>>>
(gpu_image, gpu_temp, gpu_result, gpu_test, num_translations, i_width, t_width,
translation_width, translations_per_block);
hipDeviceSynchronize();
CUT_CHECK_ERROR("");
} else {
printf("%d translations per block!\n", translations_per_block);
dim3 dim_threads_per_block(num_translations, translations_per_block, 1);
dim3 dim_blocks_per_grid(num_blocks, 1);
distanceSerialKernel<<<dim_blocks_per_grid, dim_threads_per_block>>>
(gpu_image, gpu_temp, gpu_result, gpu_test, num_translations, i_width, t_width,
translation_width, translations_per_block);
hipDeviceSynchronize();
CUT_CHECK_ERROR("");
}
num_blocks -= blocks_per_grid;
}
} else {
// int
// dim3 dim_threads_per_block(threads_per_block, 1, 1);
// dim3 dim_blocks_per_grid(num_translations / threads_per_block, 1);
printf("Reached else case of num_translations! \n");
}
///////////////////*/
printf("Temp\n");
CUDA_SAFE_CALL(hipMemcpy(test, gpu_temp, test_size,
hipMemcpyDeviceToHost));
for (int i = 0; i < 100; i++) {
printf("%f\n", test[i]);
}
printf("Image\n");
CUDA_SAFE_CALL(hipMemcpy(test, gpu_image, test_size,
hipMemcpyDeviceToHost));
for (int i = 0; i < 100; i++) {
printf("%f\n", test[i]);
}
printf("Distance");
CUDA_SAFE_CALL(hipMemcpy(test, gpu_test, test_size,
hipMemcpyDeviceToHost));
for (int i = 0; i < 100; i++) {
printf("%f\n", test[i]);
}
printf("%d\n", 5);
CUDA_SAFE_CALL(hipMemcpy(result, gpu_result, num_translations*sizeof(float),
hipMemcpyDeviceToHost));
for (int i = 0; i < num_translations; i++) {
printf("%f\n", result[i]);
}
/*
int level = 1;
int num_blocks = 1;
if (num_translations <= (threads_per_block * blocks_per_grid)) {
if (num_translations <= threads_per_block) {
dim3 dim_threads_per_block(num_translations, 1, 1);
dim3 dim_blocks_per_grid(num_blocks, 1);
while (level < num_translations) {
reductionKernel<<<dim_blocks_per_grid, dim_threads_per_block>>>
(gpu_result, num_translations, level, 0);
hipDeviceSynchronize();
CUT_CHECK_ERROR("");
level *= 2;
num_blocks /= 2;
if (num_blocks == 0) {
num_blocks = 1;
}
}
} else {
num_blocks = num_translations / threads_per_block + 1;
dim3 dim_threads_per_block(threads_per_block, 1, 1);
dim3 dim_blocks_per_grid(num_blocks, 1);
while (level < num_translations) {
reductionKernel<<<dim_blocks_per_grid, dim_threads_per_block>>>
(gpu_result, num_translations, level, 0);
hipDeviceSynchronize();
CUT_CHECK_ERROR("");
level *= 2;
num_blocks /= 2;
if (num_blocks == 0) {
num_blocks = 1;
}
}
}
} else {
printf("Input is too large!");
}
printf("%d\n", 6);
CUDA_SAFE_CALL(hipMemcpy(&new_distance, gpu_result, sizeof(float),
hipMemcpyDeviceToHost));
if (new_distance < least_distance) {
least_distance = new_distance;
}
*/
printf("%d\n", 7);
// CUDA_SAFE_CALL(hipFree(gpu_image));
// CUDA_SAFE_CALL(hipFree(gpu_temp));
CUDA_SAFE_CALL(hipFree(gpu_result));
CUDA_SAFE_CALL(hipFree(gpu_test));
free(result);
}
printf("%f\n", least_distance);
return least_distance;
}
/*
float calc_min_dist_old2(float *gpu_image, int i_width, int i_height, float *gpu_temp, int t_width) {
float least_distance = UINT_MAX;
if (t_width == 4096) {
int threads_per_block = 512;
int blocks_per_grid = 65534;
int translation_height = i_height - t_width + 1;
int translation_width = i_width - t_width + 1;
int num_translations = translation_height * translation_width;
float new_distance;
float* result = (float *)malloc(num_translations*sizeof(float));
if (result == NULL) {
printf("Unable to allocate space for result");
exit(EXIT_FAILURE);
}
for (int counter = 0; counter < num_translations; counter++) {
result[counter] = 0.0;
}
float* gpu_result;
size_t arraySize = num_translations*sizeof(float);
CUDA_SAFE_CALL(hipMalloc(&gpu_result, arraySize));
CUDA_SAFE_CALL(hipMemcpy(gpu_result, result, num_translations*sizeof(float),
hipMemcpyHostToDevice));
float* test = (float *)malloc(100*sizeof(float));
test[99] = 0;
float* gpu_test;
size_t test_size = 100*sizeof(float);
CUDA_SAFE_CALL(hipMalloc(&gpu_test, test_size));
CUDA_SAFE_CALL(hipMemcpy(gpu_test, test, test_size,
hipMemcpyHostToDevice));
printf("%d\n", 3); /*
///////////////////
dim3 dim_threads_per_block(threads_per_block, 1, 1);
dim3 dim_blocks_per_grid(blocks_per_grid, 1);
int num_operations = num_translations * t_width * t_width;
int num_per_iter = threads_per_block * blocks_per_grid;
int num_iter = num_operations / num_per_iter;
if (num_iter * num_per_iter < num_operations) {
num_iter ++;
}
for (int counter = 0; counter < num_iter; counter ++) { /*
distance4096Kernel<<<dim_blocks_per_grid, dim_threads_per_block>>>
(gpu_image, gpu_temp, gpu_result, num_translations,
num_operations - counter*num_per_iter, t_width, i_width);
distance4096Kernel<<<dim_blocks_per_grid, dim_threads_per_block>>>
(gpu_image, gpu_temp, gpu_result, num_translations,
0, t_width, i_width);
hipDeviceSynchronize();
CUT_CHECK_ERROR("");
}
//////////////////
// int num_operations = num_translations * t_width * t_width;
// int num_per_iter = threads_per_block * blocks_per_grid;
if (num_translations < threads_per_block) {
int translations_per_block = threads_per_block / num_translations;
int num_blocks = num_translations / translations_per_block + 1;
while (num_blocks > 0) {
if (num_blocks > blocks_per_grid) {
printf("%d translations per block!\n", translations_per_block);
dim3 dim_threads_per_block(num_translations, translations_per_block, 1);
dim3 dim_blocks_per_grid(blocks_per_grid, 1);
distanceSerialKernel<<<dim_blocks_per_grid, dim_threads_per_block>>>
(gpu_image, gpu_temp, gpu_result, gpu_test, num_translations, i_width, t_width,
translation_width, translations_per_block);
hipDeviceSynchronize();
CUT_CHECK_ERROR("");
} else {
printf("%d translations per block!\n", translations_per_block);
dim3 dim_threads_per_block(num_translations, translations_per_block, 1);
dim3 dim_blocks_per_grid(num_blocks, 1);
distanceSerialKernel<<<dim_blocks_per_grid, dim_threads_per_block>>>
(gpu_image, gpu_temp, gpu_result, gpu_test, num_translations, i_width, t_width,
translation_width, translations_per_block);
hipDeviceSynchronize();
CUT_CHECK_ERROR("");
}
num_blocks -= blocks_per_grid;
}
} else {
// int
// dim3 dim_threads_per_block(threads_per_block, 1, 1);
// dim3 dim_blocks_per_grid(num_translations / threads_per_block, 1);
printf("Reached else case of num_translations! \n");
}
///////////////////
printf("Temp\n");
CUDA_SAFE_CALL(hipMemcpy(test, gpu_temp, test_size,
hipMemcpyDeviceToHost));
for (int i = 0; i < 100; i++) {
printf("%f\n", test[i]);
}
printf("Image\n");
CUDA_SAFE_CALL(hipMemcpy(test, gpu_image, test_size,
hipMemcpyDeviceToHost));
for (int i = 0; i < 100; i++) {
printf("%f\n", test[i]);
}
printf("Distance");
CUDA_SAFE_CALL(hipMemcpy(test, gpu_test, test_size,
hipMemcpyDeviceToHost));
for (int i = 0; i < 100; i++) {
printf("%f\n", test[i]);
}
printf("%d\n", 5);
CUDA_SAFE_CALL(hipMemcpy(result, gpu_result, num_translations*sizeof(float),
hipMemcpyDeviceToHost));
for (int i = 0; i < num_translations; i++) {
printf("%f\n", result[i]);
}
int level = 1;
int num_blocks = 1;
if (num_translations <= (threads_per_block * blocks_per_grid)) {
if (num_translations <= threads_per_block) {
dim3 dim_threads_per_block(num_translations, 1, 1);
dim3 dim_blocks_per_grid(num_blocks, 1);
while (level < num_translations) {
reductionKernel<<<dim_blocks_per_grid, dim_threads_per_block>>>
(gpu_result, num_translations, level, 0);
hipDeviceSynchronize();
CUT_CHECK_ERROR("");
level *= 2;
num_blocks /= 2;
if (num_blocks == 0) {
num_blocks = 1;
}
}
} else {
num_blocks = num_translations / threads_per_block + 1;
dim3 dim_threads_per_block(threads_per_block, 1, 1);
dim3 dim_blocks_per_grid(num_blocks, 1);
while (level < num_translations) {
reductionKernel<<<dim_blocks_per_grid, dim_threads_per_block>>>
(gpu_result, num_translations, level, 0);
hipDeviceSynchronize();
CUT_CHECK_ERROR("");
level *= 2;
num_blocks /= 2;
if (num_blocks == 0) {
num_blocks = 1;
}
}
}
} else {
printf("Input is too large!");
}
printf("%d\n", 6);
CUDA_SAFE_CALL(hipMemcpy(&new_distance, gpu_result, sizeof(float),
hipMemcpyDeviceToHost));
if (new_distance < least_distance) {
least_distance = new_distance;
}
printf("%d\n", 7);
// CUDA_SAFE_CALL(hipFree(gpu_image));
// CUDA_SAFE_CALL(hipFree(gpu_temp));
CUDA_SAFE_CALL(hipFree(gpu_result));
CUDA_SAFE_CALL(hipFree(gpu_test));
free(result);
}
printf("%f\n", least_distance);
return least_distance;
}
/* Returns the squared Euclidean distance between TEMPLATE and IMAGE. The size of IMAGE
* is I_WIDTH * I_HEIGHT, while TEMPLATE is square with side length T_WIDTH. The template
* image should be flipped, rotated, and translated across IMAGE.
*/ /*
float calc_min_dist_old(float *image, int i_width, int i_height, float *temp, int t_width) {
// float* image and float* temp are pointers to GPU addressible memory
// You MAY NOT copy this data back to CPU addressible memory and you MAY
// NOT perform any computation using values from image or temp on the CPU.
// The only computation you may perform on the CPU directly derived from distance
// values is selecting the minimum distance value given a calculated distance and a
// "min so far"
// Basic units of computation:
// - one comparison
// - one eight configuration ie. one translation
// - one traversal in min(width, height) dimension
// - all translations
int threads_per_block = 512; // 2^9
int blocks_per_grid = 65535; // 2^16
int translation_width = i_width - t_width + 1;
int translation_height = i_height - t_width + 1;
int blocks_per_comparison = 1;
float *gpu_image, *gpu_temp;
CUDA_SAFE_CALL(hipMalloc(&gpu_image, i_width*i_height));
CUDA_SAFE_CALL(hipMalloc(gpu_image, image, i_width*i_height, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMalloc(&gpu_temp, t_width * t_width));
CUDA_SAFE_CALL(hipMalloc(gpu_temp, temp, t_width*t_width, hipMemcpyHostToDevice));
if (t_width <= 512) {
blocks_per_comparison = 512;
for (int i = 0; i < translation_height; i++) {
for (int j = 0; j < translation_width; j++) {
}
}
} else if (t_width == 1024) {
blocks_per_comparison = 2048;
for (int i = 0; i < translation_height; i++) {
for (int j = 0; j < translation_width; j++) {
}
}
} else if (t_width == 2048) {
blocks_per_comparison = 8192;
for (int i = 0; i < translation_height; i++) {
for (int j = 0; j < translation_width; j++) {
}
}
} else if (t_width >= 4096) {
size_t arraySize = translation_width * translation_height * sizeof(float);
// float* result = (float *)malloc(arraySize);
float* gpu_result;
CUDA_SAFE_CALL(hipMalloc(gpu_result, arraySize));
blocks_per_comparison = 32768;
float least_distance = UINT_MAX;
for (int i = 0; i < translation_height; i++) {
for (int j = 0; j < translation_width; j++) {
dim3 dim_threads_per_block(threads_per_block, 1, 1);
dim3 dim_blocks_per_grid(8, 4096);
leastDistance4096Kernel<<<dim_blocks_per_grid, dim_threads_per_block>>>(gpu_image, gpu_temp, gpu_result, translation_width, i, j);
hipDeviceSynchronize();
CUT_CHECK_ERROR("");
int level = 1;
while (level != (8*4096)) {
blocks_per_grid = 8*4096;
dim3 dim_blocks_per_grid(blocks_per_grid, 1);
reductionKernel<<<dim_blocks_per_grid, dim_threads_per_block>>>(gpu_result, 4096*4096, level);
hipDeviceSynchronize();
CUT_CHECK_ERROR("");
level *= 2;
blocks_per_grid /= 2;
if (blocks_per_grid == 0) {
blocks_per_grid = 1;
}
CUDA_SAFE_CALL(hipMemcpy(&gpu_result, ))
}
}
}
// dim3 dim_threads_per_block(threads_per_block, 1, 1);
// dim3 dim_blocks_per_grid(blocks_per_grid, 1);
reductionKernel<<<dim_blocks_per_grid, dim_threads_per_block>>>()
return 0;
}
*/
|
abbc83ee03635d6a46dea927ba6e11836f000f69.cu
|
/*
* Proj 3-2 SKELETON
*/
#include <float.h>
#include <stdlib.h>
#include <stdio.h>
#include <cuda_runtime.h>
#include <cutil.h>
#include "utils.h"
/* Does a horizontal flip of the array arr */
void flip_horizontal(float *arr, int width) {
/* YOU MAY WISH TO IMPLEMENT THIS */
}
/* Transposes the square array ARR. */
void transpose(float *arr, int width) {
/* YOU MAY WISH TO IMPLEMENT THIS */
}
/* Rotates the square array ARR by 90 degrees counterclockwise. */
void rotate_ccw_90(float *arr, int width) {
/* YOU MAY WISH TO IMPLEMENT THIS */
}
__global__ void reduction4096Kernel(float* gpu_result, int len, int level) {
int arrayIndex = 2*level*(blockIdx.y*4096 + blockIdx.x*512 + threadIdx.x);
if (arrayIndex + level < len) {
if (gpu_result[arrayIndex + level] < gpu_result[arrayIndex]) {
gpu_result[arrayIndex] = gpu_result[arrayIndex + level];
}
}
}
__global__ void leastDistanceKernel (float* A, float* B, float* C, int len);
__global__ void leastDistance4096Kernel(float *image, float *temp, float *result, int translation_width, int i, int j) {
float dist = temp[blockIdx.y*4096 + blockIdx.x*512 + threadIdx.x] - image[blockIdx.y*4096 + blockIdx.x*512 + threadIdx.x + i*translation_width + j];
result[blockIdx.x*512 + threadIdx.x] = dist * dist;
}
__global__ void distance4096Kernel(float* gpu_image, float* gpu_temp, float* gpu_result, float* gpu_test, int num_translations,
int offset, int t_width, int i_width) {
int thread_index = offset + blockIdx.x * blockDim.x + threadIdx.x;
if (thread_index < (num_translations * t_width * t_width)) {
int pixel_index = thread_index / num_translations;
int distance = gpu_temp[pixel_index]
- gpu_image[thread_index % num_translations + (pixel_index / t_width) * i_width + pixel_index % t_width];
if (thread_index < 100) {
gpu_test[thread_index] = distance;
}
gpu_result[thread_index % num_translations] += distance * distance;
}
}
__global__ void reductionKernel_old2(float* gpu_result, int num_iterations, int level, int offset) {
int thread_index = offset + 2 * level * (blockIdx.x * blockDim.x + threadIdx.x);
if (thread_index + level < num_iterations) {
if (gpu_result[thread_index + level] < gpu_result[thread_index]) {
gpu_result[thread_index] = gpu_result[thread_index + level];
}
}
}
__global__ void distanceSerialKernel(float* gpu_image, float* gpu_temp, float* gpu_result, float* gpu_test, int num_translations,
int i_width, int t_width, int translation_width, int translations_per_block) {
int trans_num = threadIdx.x;
int pixel_num = threadIdx.y + translations_per_block * blockIdx.x;
if (pixel_num < (t_width * t_width)) {
gpu_test[99] += 1;
gpu_test[98] += threadIdx.x;
float distance = gpu_temp[pixel_num] - gpu_image[((int) (trans_num / translation_width)) * i_width + ((int) (pixel_num / t_width)) * i_width + trans_num % translation_width + pixel_num % t_width];
if (pixel_num == 0 && trans_num < 98) {
gpu_test[trans_num] = distance * distance;
}
gpu_result[trans_num] += distance * distance;
}
}
float calc_min_dist(float *gpu_image, int i_width, int i_height, float *gpu_temp, int t_width) {
float least_distance = UINT_MAX;
if (t_width == 4096) {
int threads_per_block = 512;
int blocks_per_grid = 65534;
int translation_height = i_height - t_width + 1;
int translation_width = i_width - t_width + 1;
int num_translations = translation_height * translation_width;
float new_distance;
float* result = (float *)malloc(num_translations*sizeof(float));
if (result == NULL) {
printf("Unable to allocate space for result");
exit(EXIT_FAILURE);
}
for (int counter = 0; counter < num_translations; counter++) {
result[counter] = 0.0;
}
float* gpu_result;
size_t arraySize = num_translations*sizeof(float);
CUDA_SAFE_CALL(cudaMalloc(&gpu_result, arraySize));
CUDA_SAFE_CALL(cudaMemcpy(gpu_result, result, num_translations*sizeof(float),
cudaMemcpyHostToDevice));
float* test = (float *)malloc(100*sizeof(float));
test[99] = 0;
float* gpu_test;
size_t test_size = 100*sizeof(float);
CUDA_SAFE_CALL(cudaMalloc(&gpu_test, test_size));
CUDA_SAFE_CALL(cudaMemcpy(gpu_test, test, test_size,
cudaMemcpyHostToDevice));
printf("%d\n", 3);
///////////////////
// int blocks_per_comparison = 32768;
for (int i = 0; i < translation_height; i++) {
for (int j = 0; j < translation_width; j++) {
dim3 dim_threads_per_block(threads_per_block, 1, 1);
dim3 dim_blocks_per_grid(8, 4096);
leastDistance4096Kernel<<<dim_blocks_per_grid, dim_threads_per_block>>>
(gpu_image, gpu_temp, gpu_result, translation_width, i, j);
cudaThreadSynchronize();
CUT_CHECK_ERROR("");
int level = 1;
while (level != (8*4096)) {
printf("level = %d\n", level);
blocks_per_grid = 8*4096;
dim3 dim_threads_per_block(threads_per_block, 1, 1);
dim3 dim_blocks_per_grid(blocks_per_grid, 1);
reduction4096Kernel<<<dim_blocks_per_grid, dim_threads_per_block>>>
(gpu_result, 4096*4096, level);
cudaThreadSynchronize();
CUT_CHECK_ERROR("");
level *= 2;
blocks_per_grid /= 2;
if (blocks_per_grid == 0) {
blocks_per_grid = 1;
}
}
CUDA_SAFE_CALL(cudaMemcpy(&new_distance, gpu_result, sizeof(float),
cudaMemcpyDeviceToHost));
if (new_distance < least_distance) {
least_distance = new_distance;
}
}
}
/*
/////////////////
// int num_operations = num_translations * t_width * t_width;
// int num_per_iter = threads_per_block * blocks_per_grid;
if (num_translations < threads_per_block) {
int translations_per_block = threads_per_block / num_translations;
int num_blocks = num_translations / translations_per_block + 1;
while (num_blocks > 0) {
if (num_blocks > blocks_per_grid) {
printf("%d translations per block!\n", translations_per_block);
dim3 dim_threads_per_block(num_translations, translations_per_block, 1);
dim3 dim_blocks_per_grid(blocks_per_grid, 1);
distanceSerialKernel<<<dim_blocks_per_grid, dim_threads_per_block>>>
(gpu_image, gpu_temp, gpu_result, gpu_test, num_translations, i_width, t_width,
translation_width, translations_per_block);
cudaThreadSynchronize();
CUT_CHECK_ERROR("");
} else {
printf("%d translations per block!\n", translations_per_block);
dim3 dim_threads_per_block(num_translations, translations_per_block, 1);
dim3 dim_blocks_per_grid(num_blocks, 1);
distanceSerialKernel<<<dim_blocks_per_grid, dim_threads_per_block>>>
(gpu_image, gpu_temp, gpu_result, gpu_test, num_translations, i_width, t_width,
translation_width, translations_per_block);
cudaThreadSynchronize();
CUT_CHECK_ERROR("");
}
num_blocks -= blocks_per_grid;
}
} else {
// int
// dim3 dim_threads_per_block(threads_per_block, 1, 1);
// dim3 dim_blocks_per_grid(num_translations / threads_per_block, 1);
printf("Reached else case of num_translations! \n");
}
///////////////////*/
printf("Temp\n");
CUDA_SAFE_CALL(cudaMemcpy(test, gpu_temp, test_size,
cudaMemcpyDeviceToHost));
for (int i = 0; i < 100; i++) {
printf("%f\n", test[i]);
}
printf("Image\n");
CUDA_SAFE_CALL(cudaMemcpy(test, gpu_image, test_size,
cudaMemcpyDeviceToHost));
for (int i = 0; i < 100; i++) {
printf("%f\n", test[i]);
}
printf("Distance");
CUDA_SAFE_CALL(cudaMemcpy(test, gpu_test, test_size,
cudaMemcpyDeviceToHost));
for (int i = 0; i < 100; i++) {
printf("%f\n", test[i]);
}
printf("%d\n", 5);
CUDA_SAFE_CALL(cudaMemcpy(result, gpu_result, num_translations*sizeof(float),
cudaMemcpyDeviceToHost));
for (int i = 0; i < num_translations; i++) {
printf("%f\n", result[i]);
}
/*
int level = 1;
int num_blocks = 1;
if (num_translations <= (threads_per_block * blocks_per_grid)) {
if (num_translations <= threads_per_block) {
dim3 dim_threads_per_block(num_translations, 1, 1);
dim3 dim_blocks_per_grid(num_blocks, 1);
while (level < num_translations) {
reductionKernel<<<dim_blocks_per_grid, dim_threads_per_block>>>
(gpu_result, num_translations, level, 0);
cudaThreadSynchronize();
CUT_CHECK_ERROR("");
level *= 2;
num_blocks /= 2;
if (num_blocks == 0) {
num_blocks = 1;
}
}
} else {
num_blocks = num_translations / threads_per_block + 1;
dim3 dim_threads_per_block(threads_per_block, 1, 1);
dim3 dim_blocks_per_grid(num_blocks, 1);
while (level < num_translations) {
reductionKernel<<<dim_blocks_per_grid, dim_threads_per_block>>>
(gpu_result, num_translations, level, 0);
cudaThreadSynchronize();
CUT_CHECK_ERROR("");
level *= 2;
num_blocks /= 2;
if (num_blocks == 0) {
num_blocks = 1;
}
}
}
} else {
printf("Input is too large!");
}
printf("%d\n", 6);
CUDA_SAFE_CALL(cudaMemcpy(&new_distance, gpu_result, sizeof(float),
cudaMemcpyDeviceToHost));
if (new_distance < least_distance) {
least_distance = new_distance;
}
*/
printf("%d\n", 7);
// CUDA_SAFE_CALL(cudaFree(gpu_image));
// CUDA_SAFE_CALL(cudaFree(gpu_temp));
CUDA_SAFE_CALL(cudaFree(gpu_result));
CUDA_SAFE_CALL(cudaFree(gpu_test));
free(result);
}
printf("%f\n", least_distance);
return least_distance;
}
/*
float calc_min_dist_old2(float *gpu_image, int i_width, int i_height, float *gpu_temp, int t_width) {
float least_distance = UINT_MAX;
if (t_width == 4096) {
int threads_per_block = 512;
int blocks_per_grid = 65534;
int translation_height = i_height - t_width + 1;
int translation_width = i_width - t_width + 1;
int num_translations = translation_height * translation_width;
float new_distance;
float* result = (float *)malloc(num_translations*sizeof(float));
if (result == NULL) {
printf("Unable to allocate space for result");
exit(EXIT_FAILURE);
}
for (int counter = 0; counter < num_translations; counter++) {
result[counter] = 0.0;
}
float* gpu_result;
size_t arraySize = num_translations*sizeof(float);
CUDA_SAFE_CALL(cudaMalloc(&gpu_result, arraySize));
CUDA_SAFE_CALL(cudaMemcpy(gpu_result, result, num_translations*sizeof(float),
cudaMemcpyHostToDevice));
float* test = (float *)malloc(100*sizeof(float));
test[99] = 0;
float* gpu_test;
size_t test_size = 100*sizeof(float);
CUDA_SAFE_CALL(cudaMalloc(&gpu_test, test_size));
CUDA_SAFE_CALL(cudaMemcpy(gpu_test, test, test_size,
cudaMemcpyHostToDevice));
printf("%d\n", 3); /*
///////////////////
dim3 dim_threads_per_block(threads_per_block, 1, 1);
dim3 dim_blocks_per_grid(blocks_per_grid, 1);
int num_operations = num_translations * t_width * t_width;
int num_per_iter = threads_per_block * blocks_per_grid;
int num_iter = num_operations / num_per_iter;
if (num_iter * num_per_iter < num_operations) {
num_iter ++;
}
for (int counter = 0; counter < num_iter; counter ++) { /*
distance4096Kernel<<<dim_blocks_per_grid, dim_threads_per_block>>>
(gpu_image, gpu_temp, gpu_result, num_translations,
num_operations - counter*num_per_iter, t_width, i_width);
distance4096Kernel<<<dim_blocks_per_grid, dim_threads_per_block>>>
(gpu_image, gpu_temp, gpu_result, num_translations,
0, t_width, i_width);
cudaThreadSynchronize();
CUT_CHECK_ERROR("");
}
//////////////////
// int num_operations = num_translations * t_width * t_width;
// int num_per_iter = threads_per_block * blocks_per_grid;
if (num_translations < threads_per_block) {
int translations_per_block = threads_per_block / num_translations;
int num_blocks = num_translations / translations_per_block + 1;
while (num_blocks > 0) {
if (num_blocks > blocks_per_grid) {
printf("%d translations per block!\n", translations_per_block);
dim3 dim_threads_per_block(num_translations, translations_per_block, 1);
dim3 dim_blocks_per_grid(blocks_per_grid, 1);
distanceSerialKernel<<<dim_blocks_per_grid, dim_threads_per_block>>>
(gpu_image, gpu_temp, gpu_result, gpu_test, num_translations, i_width, t_width,
translation_width, translations_per_block);
cudaThreadSynchronize();
CUT_CHECK_ERROR("");
} else {
printf("%d translations per block!\n", translations_per_block);
dim3 dim_threads_per_block(num_translations, translations_per_block, 1);
dim3 dim_blocks_per_grid(num_blocks, 1);
distanceSerialKernel<<<dim_blocks_per_grid, dim_threads_per_block>>>
(gpu_image, gpu_temp, gpu_result, gpu_test, num_translations, i_width, t_width,
translation_width, translations_per_block);
cudaThreadSynchronize();
CUT_CHECK_ERROR("");
}
num_blocks -= blocks_per_grid;
}
} else {
// int
// dim3 dim_threads_per_block(threads_per_block, 1, 1);
// dim3 dim_blocks_per_grid(num_translations / threads_per_block, 1);
printf("Reached else case of num_translations! \n");
}
///////////////////
printf("Temp\n");
CUDA_SAFE_CALL(cudaMemcpy(test, gpu_temp, test_size,
cudaMemcpyDeviceToHost));
for (int i = 0; i < 100; i++) {
printf("%f\n", test[i]);
}
printf("Image\n");
CUDA_SAFE_CALL(cudaMemcpy(test, gpu_image, test_size,
cudaMemcpyDeviceToHost));
for (int i = 0; i < 100; i++) {
printf("%f\n", test[i]);
}
printf("Distance");
CUDA_SAFE_CALL(cudaMemcpy(test, gpu_test, test_size,
cudaMemcpyDeviceToHost));
for (int i = 0; i < 100; i++) {
printf("%f\n", test[i]);
}
printf("%d\n", 5);
CUDA_SAFE_CALL(cudaMemcpy(result, gpu_result, num_translations*sizeof(float),
cudaMemcpyDeviceToHost));
for (int i = 0; i < num_translations; i++) {
printf("%f\n", result[i]);
}
int level = 1;
int num_blocks = 1;
if (num_translations <= (threads_per_block * blocks_per_grid)) {
if (num_translations <= threads_per_block) {
dim3 dim_threads_per_block(num_translations, 1, 1);
dim3 dim_blocks_per_grid(num_blocks, 1);
while (level < num_translations) {
reductionKernel<<<dim_blocks_per_grid, dim_threads_per_block>>>
(gpu_result, num_translations, level, 0);
cudaThreadSynchronize();
CUT_CHECK_ERROR("");
level *= 2;
num_blocks /= 2;
if (num_blocks == 0) {
num_blocks = 1;
}
}
} else {
num_blocks = num_translations / threads_per_block + 1;
dim3 dim_threads_per_block(threads_per_block, 1, 1);
dim3 dim_blocks_per_grid(num_blocks, 1);
while (level < num_translations) {
reductionKernel<<<dim_blocks_per_grid, dim_threads_per_block>>>
(gpu_result, num_translations, level, 0);
cudaThreadSynchronize();
CUT_CHECK_ERROR("");
level *= 2;
num_blocks /= 2;
if (num_blocks == 0) {
num_blocks = 1;
}
}
}
} else {
printf("Input is too large!");
}
printf("%d\n", 6);
CUDA_SAFE_CALL(cudaMemcpy(&new_distance, gpu_result, sizeof(float),
cudaMemcpyDeviceToHost));
if (new_distance < least_distance) {
least_distance = new_distance;
}
printf("%d\n", 7);
// CUDA_SAFE_CALL(cudaFree(gpu_image));
// CUDA_SAFE_CALL(cudaFree(gpu_temp));
CUDA_SAFE_CALL(cudaFree(gpu_result));
CUDA_SAFE_CALL(cudaFree(gpu_test));
free(result);
}
printf("%f\n", least_distance);
return least_distance;
}
/* Returns the squared Euclidean distance between TEMPLATE and IMAGE. The size of IMAGE
* is I_WIDTH * I_HEIGHT, while TEMPLATE is square with side length T_WIDTH. The template
* image should be flipped, rotated, and translated across IMAGE.
*/ /*
float calc_min_dist_old(float *image, int i_width, int i_height, float *temp, int t_width) {
// float* image and float* temp are pointers to GPU addressible memory
// You MAY NOT copy this data back to CPU addressible memory and you MAY
// NOT perform any computation using values from image or temp on the CPU.
// The only computation you may perform on the CPU directly derived from distance
// values is selecting the minimum distance value given a calculated distance and a
// "min so far"
// Basic units of computation:
// - one comparison
// - one eight configuration ie. one translation
// - one traversal in min(width, height) dimension
// - all translations
int threads_per_block = 512; // 2^9
int blocks_per_grid = 65535; // 2^16
int translation_width = i_width - t_width + 1;
int translation_height = i_height - t_width + 1;
int blocks_per_comparison = 1;
float *gpu_image, *gpu_temp;
CUDA_SAFE_CALL(cudaMalloc(&gpu_image, i_width*i_height));
CUDA_SAFE_CALL(cudaMalloc(gpu_image, image, i_width*i_height, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMalloc(&gpu_temp, t_width * t_width));
CUDA_SAFE_CALL(cudaMalloc(gpu_temp, temp, t_width*t_width, cudaMemcpyHostToDevice));
if (t_width <= 512) {
blocks_per_comparison = 512;
for (int i = 0; i < translation_height; i++) {
for (int j = 0; j < translation_width; j++) {
}
}
} else if (t_width == 1024) {
blocks_per_comparison = 2048;
for (int i = 0; i < translation_height; i++) {
for (int j = 0; j < translation_width; j++) {
}
}
} else if (t_width == 2048) {
blocks_per_comparison = 8192;
for (int i = 0; i < translation_height; i++) {
for (int j = 0; j < translation_width; j++) {
}
}
} else if (t_width >= 4096) {
size_t arraySize = translation_width * translation_height * sizeof(float);
// float* result = (float *)malloc(arraySize);
float* gpu_result;
CUDA_SAFE_CALL(cudaMalloc(gpu_result, arraySize));
blocks_per_comparison = 32768;
float least_distance = UINT_MAX;
for (int i = 0; i < translation_height; i++) {
for (int j = 0; j < translation_width; j++) {
dim3 dim_threads_per_block(threads_per_block, 1, 1);
dim3 dim_blocks_per_grid(8, 4096);
leastDistance4096Kernel<<<dim_blocks_per_grid, dim_threads_per_block>>>(gpu_image, gpu_temp, gpu_result, translation_width, i, j);
cudaThreadSynchronize();
CUT_CHECK_ERROR("");
int level = 1;
while (level != (8*4096)) {
blocks_per_grid = 8*4096;
dim3 dim_blocks_per_grid(blocks_per_grid, 1);
reductionKernel<<<dim_blocks_per_grid, dim_threads_per_block>>>(gpu_result, 4096*4096, level);
cudaThreadSynchronize();
CUT_CHECK_ERROR("");
level *= 2;
blocks_per_grid /= 2;
if (blocks_per_grid == 0) {
blocks_per_grid = 1;
}
CUDA_SAFE_CALL(cudaMemcpy(&gpu_result, ))
}
}
}
// dim3 dim_threads_per_block(threads_per_block, 1, 1);
// dim3 dim_blocks_per_grid(blocks_per_grid, 1);
reductionKernel<<<dim_blocks_per_grid, dim_threads_per_block>>>()
return 0;
}
*/
|
68cad9baff01520fe9760570f2728500f94c736a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "fdtd_calculations.h"
#include <stdio.h>
#include "utils.h"
void copySymbolsToDevice(FdtdParams *params)
{
CHECK(hipMemcpyToSymbol(dNx, ¶ms->nx, sizeof(int)));
CHECK(hipMemcpyToSymbol(dNy, ¶ms->ny, sizeof(int)));
CHECK(hipMemcpyToSymbol(dNz, ¶ms->nz, sizeof(int)));
CHECK(hipMemcpyToSymbol(dDt, ¶ms->dt, sizeof(float)));
CHECK(hipMemcpyToSymbol(dDx, ¶ms->dx, sizeof(float)));
CHECK(hipMemcpyToSymbol(dDy, ¶ms->dy, sizeof(float)));
CHECK(hipMemcpyToSymbol(dDz, ¶ms->dz, sizeof(float)));
CHECK(hipMemcpyToSymbol(dMu0, ¶ms->mu0, sizeof(float)));
CHECK(hipMemcpyToSymbol(dEps0, ¶ms->eps0, sizeof(float)));
CHECK(hipMemcpyToSymbol(dSourcesCount, ¶ms->sourcesCount, sizeof(int)));
int sourcesCount = (D_MAX_SOURCES < params->sourcesCount) ? D_MAX_SOURCES : params->sourcesCount;
CHECK(hipMemcpyToSymbol(dSources, params->sources, sizeof(int) * 3 * sourcesCount));
int jzCount = (D_MAX_JZ < params->jzCount) ? D_MAX_JZ : params->jzCount;
CHECK(hipMemcpyToSymbol(dJz, params->jz, sizeof(float) * jzCount));
}
__global__ void updateHField(float *hx, float *hy, float *hz,
float *exSource, float *eySource, float *ezSource)
{
int nx = dNx;
int ny = dNy;
int nz = dNz;
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
int iz = threadIdx.z + blockIdx.z * blockDim.z;
// Update hx
if(ix >= 1 && ix < nx-1 &&
iy >= 0 && iy < ny-1 &&
iz >= 0 && iz < nz-1) {
OFFSET(hx, ix, iy, iz) = OFFSET(hx, ix, iy, iz) -
dDt/(dMu0 * dDy) *
(OFFSET(ezSource, ix, iy+1, iz) - OFFSET(ezSource, ix, iy, iz)) +
dDt/(dMu0 * dDy) *
(OFFSET(eySource, ix, iy, iz+1) - OFFSET(eySource, ix, iy, iz));
}
// Update hy
if(ix >= 0 && ix < nx-1 &&
iy >= 1 && iy < ny-1 &&
iz >= 0 && iz < nz-1) {
OFFSET(hy, ix, iy, iz) = OFFSET(hy, ix, iy, iz) -
dDt/(dMu0 * dDz) *
(OFFSET(exSource, ix, iy, iz+1) - OFFSET(exSource, ix, iy, iz)) +
dDt/(dMu0 * dDx) *
(OFFSET(ezSource, ix+1, iy, iz) - OFFSET(ezSource, ix, iy, iz));
}
// Update hz
if(ix >= 0 && ix < nx-1 &&
iy >= 0 && iy < ny-1 &&
iz >= 1 && iz < nz-1) {
OFFSET(hz, ix, iy, iz) = OFFSET(hz, ix, iy, iz) -
dDt/(dMu0 * dDx) *
(OFFSET(eySource, ix+1, iy, iz) - OFFSET(eySource, ix, iy, iz)) +
dDt/(dMu0 * dDy) *
(OFFSET(exSource, ix, iy+1, iz) - OFFSET(exSource, ix, iy, iz));
}
}
__global__ void updateDField(float *dxTarget, float *dyTarget, float *dzTarget,
float *dxSource, float *dySource, float *dzSource,
float *hx, float *hy, float *hz)
{
int nx = dNx;
int ny = dNy;
int nz = dNz;
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
int iz = threadIdx.z + blockIdx.z * blockDim.z;
float x = dDt/dDx;
float y = dDt/dDy;
float z = dDt/dDz;
// Update dDx
if(ix >= 0 && ix < nx-1 &&
iy >= 1 && iy < ny-1 &&
iz >= 1 && iz < nz-1) {
OFFSET(dxTarget, ix, iy, iz) = OFFSET(dxSource, ix, iy, iz) +
y * (OFFSET(hz, ix, iy, iz) - OFFSET(hz, ix, iy-1, iz)) -
z * (OFFSET(hy, ix, iy, iz) - OFFSET(hy, ix, iy, iz-1));
}
// Update dDy
if(ix >= 1 && ix < nx-1 &&
iy >= 0 && iy < ny-1 &&
iz >= 1 && iz < nz-1) {
OFFSET(dyTarget, ix, iy, iz) = OFFSET(dySource, ix, iy, iz) +
z * (OFFSET(hx, ix, iy, iz) - OFFSET(hx, ix, iy, iz-1)) -
x * (OFFSET(hz, ix, iy, iz) - OFFSET(hz, ix-1, iy, iz));
}
// Update dDz
if(ix >= 1 && ix < nx-1 &&
iy >= 1 && iy < ny-1 &&
iz >= 0 && iz < nz-1) {
OFFSET(dzTarget, ix, iy, iz) = OFFSET(dzSource, ix, iy, iz) +
x * (OFFSET(hy, ix, iy, iz) - OFFSET(hy, ix-1, iy, iz)) -
y * (OFFSET(hx, ix, iy, iz) - OFFSET(hx, ix, iy-1, iz));
}
}
__global__ void updateEField(float *exTarget, float *eyTarget, float *ezTarget,
float *exSource0, float *eySource0, float *ezSource0,
float *exSource1, float *eySource1, float *ezSource1,
float *dxSource0, float *dySource0, float *dzSource0,
float *dxSource1, float *dySource1, float *dzSource1,
float *dxSource2, float *dySource2, float *dzSource2,
float *sigma, float *epsI, float *epsS, float *tauD)
{
int nx = dNx;
int ny = dNy;
int nz = dNz;
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
int iz = threadIdx.z + blockIdx.z * blockDim.z;
float a = 2.0 * dEps0 * OFFSET(epsI, ix, iy, iz) * OFFSET(tauD, ix, iy, iz);
float b = a + 2.0 * dDt * (dEps0 * OFFSET(epsS, ix, iy, iz) + OFFSET(sigma, ix, iy, iz) * OFFSET(tauD, ix, iy, iz));
float c = 1.0/(b + OFFSET(sigma, ix, iy, iz) * dDt * dDt);
float d = (2.0 * b - OFFSET(sigma, ix, iy, iz) * dDt * dDt);
// Update ex
if(ix >= 0 && ix < nx-1 &&
iy >= 1 && iy < ny-1 &&
iz >= 1 && iz < nz-1) {
OFFSET(exTarget, ix, iy, iz) = c *
(d * OFFSET(exSource0, ix, iy, iz) -
a * OFFSET(exSource1, ix, iy, iz) +
(2.0 * (dDt + OFFSET(tauD, ix, iy, iz))) * OFFSET(dxSource0, ix, iy, iz) -
(2.0 * dDt + 4.0 * OFFSET(tauD, ix, iy, iz)) * OFFSET(dxSource1, ix, iy, iz) +
(2.0 * OFFSET(tauD, ix, iy, iz)) * OFFSET(dxSource2, ix, iy, iz)
);
}
// Update ey
if(ix >= 1 && ix <= nx-1 &&
iy >= 0 && iy <= ny-1 &&
iz >= 1 && iz <= nz-1) {
OFFSET(eyTarget, ix, iy, iz) = c *
(d * OFFSET(eySource0, ix, iy, iz) -
a * OFFSET(eySource1, ix, iy, iz) +
(2.0 * (dDt + OFFSET(tauD, ix, iy, iz))) * OFFSET(dySource0, ix, iy, iz) -
(2.0 * dDt + 4.0 * OFFSET(tauD, ix, iy, iz)) * OFFSET(dySource1, ix, iy, iz) +
(2.0 * OFFSET(tauD, ix, iy, iz)) * OFFSET(dySource2, ix, iy, iz)
);
}
// Update ez
if(ix >= 1 && ix <= nx-1 &&
iy >= 0 && iy <= ny-1 &&
iz >= 1 && iz <= nz-1) {
OFFSET(ezTarget, ix, iy, iz) = c *
(d * OFFSET(ezSource0, ix, iy, iz) -
a * OFFSET(ezSource1, ix, iy, iz) +
(2.0 * (dDt + OFFSET(tauD, ix, iy, iz))) * OFFSET(dzSource0, ix, iy, iz) -
(2.0 * dDt + 4.0 * OFFSET(tauD, ix, iy, iz)) * OFFSET(dzSource1, ix, iy, iz) +
(2.0 * OFFSET(tauD, ix, iy, iz)) * OFFSET(dzSource2, ix, iy, iz)
);
}
}
__global__ void updateSources(float *dzTarget, float *dzSource,
float *hx, float *hy,
int currIteration)
{
int nx = dNx;
int ny = dNy;
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
int iz = threadIdx.z + blockIdx.z * blockDim.z;
// Update source
if(ix == 0 && iy == 0 && iz == 0) {
for(int i=0; i < dSourcesCount; i++) {
int x = dSources[i * 3 + 0];
int y = dSources[i * 3 + 1];
int z = dSources[i * 3 + 2];
float val = OFFSET(dzSource, x, y, z) +
dDt/dDx * (OFFSET(hy, x, y, z) - OFFSET(hy, x-1, y, z)) -
dDt/dDy * (OFFSET(hx, x, y, z) - OFFSET(hx, x, y-1, z)) -
dJz[currIteration];
OFFSET(dzTarget, x, y, z) = val;
}
}
}
__global__ void updateMurBoundary(float *exTarget, float *eyTarget, float *ezTarget,
float *exSource, float *eySource, float *ezSource,
float *rpx0, float *rpy0, float *rpz0,
float *rpxEnd, float *rpyEnd, float *rpzEnd)
{
int nx = dNx;
int ny = dNy;
int nz = dNz;
int rpnx, rpny;
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
int iz = threadIdx.z + blockIdx.z * blockDim.z;
// Update ex
// for rpy
rpnx = dNx;
rpny = 2;
if(ix >= 0 && ix < nx-1 &&
iy == 0 &&
iz >= 1 && iz < nz-1) {
OFFSET(exTarget, ix, iy, iz) = 1/(dDt + dDy * sqrt(dMu0 * dEps0 * OFFSETRP(rpy0, ix, 0, iz))) *
(
(dDt - dDy * sqrt(dMu0 * dEps0 * OFFSETRP(rpy0, ix, 1, iz))) *
OFFSET(exTarget, ix, iy+1, iz) +
(dDt + dDy * sqrt(dMu0 * dEps0 * OFFSETRP(rpy0, ix, 1, iz))) *
OFFSET(exSource, ix, iy+1, iz) -
(dDt - dDy * sqrt(dMu0 * dEps0 * OFFSETRP(rpy0, ix, 0, iz))) *
OFFSET(exSource, ix, iy, iz)
);
}
if(ix >= 0 && ix < nx-1 &&
iy == ny-1 &&
iz >= 1 && iz < nz-1) {
OFFSET(exTarget, ix, iy, iz) = 1/(dDt + dDy * sqrt(dMu0 * dEps0 * OFFSETRP(rpyEnd, ix, 1, iz))) *
(
(dDt - dDy * sqrt(dMu0 * dEps0 * OFFSETRP(rpyEnd, ix, 0, iz))) *
OFFSET(exTarget, ix, iy-1, iz) +
(dDt + dDy * sqrt(dMu0 * dEps0 * OFFSETRP(rpyEnd, ix, 0, iz))) *
OFFSET(exSource, ix, iy-1, iz) -
(dDt - dDy * sqrt(dMu0 * dEps0 * OFFSETRP(rpyEnd, ix, 1, iz))) *
OFFSET(exSource, ix, iy, iz)
);
}
// for rpz
rpnx = dNx;
rpny = dNy;
if(ix >= 0 && ix < nx-1 &&
iy >= 1 && iy < ny-1 &&
iz == 0) {
OFFSET(exTarget, ix, iy, iz) = 1/(dDt + dDz * sqrt(dMu0 * dEps0 * OFFSETRP(rpz0, ix, iy, 0))) *
(
(dDt - dDz * sqrt(dMu0 * dEps0 * OFFSETRP(rpz0, ix, iy, 1))) *
OFFSET(exTarget, ix, iy, iz+1) +
(dDt + dDz * sqrt(dMu0 * dEps0 * OFFSETRP(rpz0, ix, iy, 1))) *
OFFSET(exSource, ix, iy, iz+1) -
(dDt - dDz * sqrt(dMu0 * dEps0 * OFFSETRP(rpz0, ix, iy, 0))) *
OFFSET(exSource, ix, iy, iz)
);
}
if(ix >= 0 && ix < nx-1 &&
iy >= 1 && iy < ny-1 &&
iz == nz-1) {
OFFSET(exTarget, ix, iy, iz) = 1/(dDt + dDz * sqrt(dMu0 * dEps0 * OFFSETRP(rpzEnd, ix, iy, 1))) *
(
(dDt - dDz * sqrt(dMu0 * dEps0 * OFFSETRP(rpzEnd, ix, iy, 0))) *
OFFSET(exTarget, ix, iy, iz-1) +
(dDt + dDz * sqrt(dMu0 * dEps0 * OFFSETRP(rpzEnd, ix, iy, 0))) *
OFFSET(exSource, ix, iy, iz-1) -
(dDt - dDz * sqrt(dMu0 * dEps0 * OFFSETRP(rpzEnd, ix, iy, 1))) *
OFFSET(exSource, ix, iy, iz)
);
}
// Update ey
// for rpx
rpnx = 2;
rpny = dNy;
if(ix == 0 &&
iy >= 0 && iy < ny-1 &&
iz >= 1 && iz < nz-1) {
OFFSET(eyTarget, ix, iy, iz) = 1/(dDt + dDx * sqrt(dMu0 * dEps0 * OFFSETRP(rpx0, 0, iy, iz))) *
(
(dDt - dDx * sqrt(dMu0 * dEps0 * OFFSETRP(rpx0, 1, iy, iz))) *
OFFSET(eyTarget, ix+1, iy, iz) +
(dDt + dDx * sqrt(dMu0 * dEps0 * OFFSETRP(rpx0, 1, iy, iz))) *
OFFSET(eySource, ix+1, iy, iz) -
(dDt - dDx * sqrt(dMu0 * dEps0 * OFFSETRP(rpx0, 0, iy, iz))) *
OFFSET(eySource, ix, iy, iz)
);
}
if(ix == nx-1 &&
iy >= 0 && iy < ny-1 &&
iz >= 1 && iz < nz-1) {
OFFSET(eyTarget, ix, iy, iz) = 1/(dDt + dDx * sqrt(dMu0 * dEps0 * OFFSETRP(rpxEnd, 1, iy, iz))) *
(
(dDt - dDx * sqrt(dMu0 * dEps0 * OFFSETRP(rpxEnd, 0, iy, iz))) *
OFFSET(eySource, ix-1, iy, iz) +
(dDt + dDx * sqrt(dMu0 * dEps0 * OFFSETRP(rpxEnd, 0, iy, iz))) *
OFFSET(eySource, ix-1, iy, iz) -
(dDt - dDx * sqrt(dMu0 * dEps0 * OFFSETRP(rpxEnd, 1, iy, iz))) *
OFFSET(eySource, ix, iy, iz)
);
}
// for rpz
rpnx = dNx;
rpny = dNy;
if(ix >= 1 && ix < nx-1 &&
iy >= 0 && iy < ny-1 &&
iz == 0) {
OFFSET(eyTarget, ix, iy, iz) = 1/(dDt + dDz * sqrt(dMu0 * dEps0 * OFFSETRP(rpz0, ix, iy, 0))) *
(
(dDt - dDz * sqrt(dMu0 * dEps0 * OFFSETRP(rpz0, ix, iy, 1))) *
OFFSET(eyTarget, ix, iy,iz+1) +
(dDt + dDz * sqrt(dMu0 * dEps0 * OFFSETRP(rpz0, ix, iy, 1))) *
OFFSET(eySource, ix, iy, iz+1) -
(dDt - dDz * sqrt(dMu0 * dEps0 * OFFSETRP(rpz0, ix, iy, 0))) *
OFFSET(eySource, ix, iy, iz)
);
}
if(ix >= 1 && ix < nx-1 &&
iy >= 0 && iy < ny-1 &&
iz == nz-1) {
OFFSET(eyTarget, ix, iy, iz) = 1/(dDt + dDz * sqrt(dMu0 * dEps0 * OFFSETRP(rpzEnd, ix, iy, 1))) *
(
(dDt - dDz * sqrt(dMu0 * dEps0 * OFFSETRP(rpzEnd, ix, iy, 0))) *
OFFSET(eyTarget, ix, iy, iz-1) +
(dDt + dDz * sqrt(dMu0 * dEps0 * OFFSETRP(rpzEnd, ix, iy, 0))) *
OFFSET(eySource, ix, iy, iz-1) -
(dDt - dDz *sqrt(dMu0 * dEps0 * OFFSETRP(rpzEnd, ix, iy, 1))) *
OFFSET(eySource, ix, iy, iz)
);
}
// Update ez
// for rpz
rpnx = 2;
rpny = dNy;
if(ix == 0 &&
iy >= 1 && iy < ny-1 &&
iz >= 0 && iz < nz-1) {
OFFSET(ezTarget, ix, iy, iz) = 1/(dDt + dDx * sqrt(dMu0 * dEps0 * OFFSETRP(rpx0, 0, iy, iz))) *
(
(dDt - dDx * sqrt(dMu0 * dEps0 * OFFSETRP(rpx0, 1, iy, iz))) *
OFFSET(ezTarget, ix+1, iy, iz) +
(dDt + dDx * sqrt(dMu0 * dEps0 * OFFSETRP(rpx0, 1, iy, iz))) *
OFFSET(ezSource, ix+1, iy, iz) -
(dDt - dDx * sqrt(dMu0 * dEps0 * OFFSETRP(rpx0, 0, iy, iz))) *
OFFSET(ezSource, ix, iy, iz)
);
}
if(ix == nx-1 &&
iy >= 1 && iy < ny-1 &&
iz >= 0 && iz < nz-1) {
OFFSET(ezTarget, ix, iy, iz) = 1/(dDt + dDx * sqrt(dMu0 * dEps0 * OFFSETRP(rpxEnd, 1, iy, iz))) *
(
(dDt - dDx * sqrt(dMu0 * dEps0 * OFFSETRP(rpxEnd, 0, iy, iz))) *
OFFSET(ezTarget, ix-1, iy, iz) +
(dDt + dDx * sqrt(dMu0 * dEps0 * OFFSETRP(rpxEnd, 0, iy, iz))) *
OFFSET(ezSource, ix-1, iy, iz) -
(dDt - dDx * sqrt(dMu0 * dEps0 * OFFSETRP(rpxEnd, 1, iy, iz))) *
OFFSET(ezSource, ix, iy, iz)
);
}
// for rpy
rpnx = dNx;
rpny = 2;
if(ix >= 1 && ix < nx-1 &&
iy == 0 &&
iz >= 0 && iz < nz-1) {
OFFSET(ezTarget, ix, iy, iz) = 1/(dDt + dDy * sqrt(dMu0 * dEps0 * OFFSETRP(rpy0, ix, 0, iz))) *
(
(dDt - dDy * sqrt(dMu0 * dEps0 * OFFSETRP(rpy0, ix, 1, iz))) *
OFFSET(ezTarget, ix, iy+1, iz) +
(dDt + dDy * sqrt(dMu0 * dEps0 * OFFSETRP(rpy0, ix, 1, iz))) *
OFFSET(ezSource, ix, iy+1, iz) -
(dDt - dDy * sqrt(dMu0 * dEps0 * OFFSETRP(rpy0, ix, 0, iz))) *
OFFSET(ezSource, ix, iy, iz)
);
}
if(ix >= 1 && ix < nx-1 &&
iy == ny-1 &&
iz >= 0 && iz < nz-1) {
OFFSET(ezTarget, ix, iy, iz) = 1/(dDt + dDy * sqrt(dMu0 * dEps0 * OFFSETRP(rpyEnd, ix, 1, iz))) *
(
(dDt - dDy * sqrt(dMu0 * dEps0 * OFFSETRP(rpyEnd, ix, 0, iz))) *
OFFSET(ezTarget, ix, iy-1, iz) +
(dDt + dDy * sqrt(dMu0 * dEps0 * OFFSETRP(rpyEnd, ix, 0, iz))) *
OFFSET(ezSource, ix, iy-1, iz) -
(dDt - dDy * sqrt(dMu0 * dEps0 * OFFSETRP(rpyEnd, ix, 1, iz))) *
OFFSET(ezSource, ix, iy, iz)
);
}
}
|
68cad9baff01520fe9760570f2728500f94c736a.cu
|
#include "fdtd_calculations.h"
#include <stdio.h>
#include "utils.h"
void copySymbolsToDevice(FdtdParams *params)
{
CHECK(cudaMemcpyToSymbol(dNx, ¶ms->nx, sizeof(int)));
CHECK(cudaMemcpyToSymbol(dNy, ¶ms->ny, sizeof(int)));
CHECK(cudaMemcpyToSymbol(dNz, ¶ms->nz, sizeof(int)));
CHECK(cudaMemcpyToSymbol(dDt, ¶ms->dt, sizeof(float)));
CHECK(cudaMemcpyToSymbol(dDx, ¶ms->dx, sizeof(float)));
CHECK(cudaMemcpyToSymbol(dDy, ¶ms->dy, sizeof(float)));
CHECK(cudaMemcpyToSymbol(dDz, ¶ms->dz, sizeof(float)));
CHECK(cudaMemcpyToSymbol(dMu0, ¶ms->mu0, sizeof(float)));
CHECK(cudaMemcpyToSymbol(dEps0, ¶ms->eps0, sizeof(float)));
CHECK(cudaMemcpyToSymbol(dSourcesCount, ¶ms->sourcesCount, sizeof(int)));
int sourcesCount = (D_MAX_SOURCES < params->sourcesCount) ? D_MAX_SOURCES : params->sourcesCount;
CHECK(cudaMemcpyToSymbol(dSources, params->sources, sizeof(int) * 3 * sourcesCount));
int jzCount = (D_MAX_JZ < params->jzCount) ? D_MAX_JZ : params->jzCount;
CHECK(cudaMemcpyToSymbol(dJz, params->jz, sizeof(float) * jzCount));
}
__global__ void updateHField(float *hx, float *hy, float *hz,
float *exSource, float *eySource, float *ezSource)
{
int nx = dNx;
int ny = dNy;
int nz = dNz;
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
int iz = threadIdx.z + blockIdx.z * blockDim.z;
// Update hx
if(ix >= 1 && ix < nx-1 &&
iy >= 0 && iy < ny-1 &&
iz >= 0 && iz < nz-1) {
OFFSET(hx, ix, iy, iz) = OFFSET(hx, ix, iy, iz) -
dDt/(dMu0 * dDy) *
(OFFSET(ezSource, ix, iy+1, iz) - OFFSET(ezSource, ix, iy, iz)) +
dDt/(dMu0 * dDy) *
(OFFSET(eySource, ix, iy, iz+1) - OFFSET(eySource, ix, iy, iz));
}
// Update hy
if(ix >= 0 && ix < nx-1 &&
iy >= 1 && iy < ny-1 &&
iz >= 0 && iz < nz-1) {
OFFSET(hy, ix, iy, iz) = OFFSET(hy, ix, iy, iz) -
dDt/(dMu0 * dDz) *
(OFFSET(exSource, ix, iy, iz+1) - OFFSET(exSource, ix, iy, iz)) +
dDt/(dMu0 * dDx) *
(OFFSET(ezSource, ix+1, iy, iz) - OFFSET(ezSource, ix, iy, iz));
}
// Update hz
if(ix >= 0 && ix < nx-1 &&
iy >= 0 && iy < ny-1 &&
iz >= 1 && iz < nz-1) {
OFFSET(hz, ix, iy, iz) = OFFSET(hz, ix, iy, iz) -
dDt/(dMu0 * dDx) *
(OFFSET(eySource, ix+1, iy, iz) - OFFSET(eySource, ix, iy, iz)) +
dDt/(dMu0 * dDy) *
(OFFSET(exSource, ix, iy+1, iz) - OFFSET(exSource, ix, iy, iz));
}
}
__global__ void updateDField(float *dxTarget, float *dyTarget, float *dzTarget,
float *dxSource, float *dySource, float *dzSource,
float *hx, float *hy, float *hz)
{
int nx = dNx;
int ny = dNy;
int nz = dNz;
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
int iz = threadIdx.z + blockIdx.z * blockDim.z;
float x = dDt/dDx;
float y = dDt/dDy;
float z = dDt/dDz;
// Update dDx
if(ix >= 0 && ix < nx-1 &&
iy >= 1 && iy < ny-1 &&
iz >= 1 && iz < nz-1) {
OFFSET(dxTarget, ix, iy, iz) = OFFSET(dxSource, ix, iy, iz) +
y * (OFFSET(hz, ix, iy, iz) - OFFSET(hz, ix, iy-1, iz)) -
z * (OFFSET(hy, ix, iy, iz) - OFFSET(hy, ix, iy, iz-1));
}
// Update dDy
if(ix >= 1 && ix < nx-1 &&
iy >= 0 && iy < ny-1 &&
iz >= 1 && iz < nz-1) {
OFFSET(dyTarget, ix, iy, iz) = OFFSET(dySource, ix, iy, iz) +
z * (OFFSET(hx, ix, iy, iz) - OFFSET(hx, ix, iy, iz-1)) -
x * (OFFSET(hz, ix, iy, iz) - OFFSET(hz, ix-1, iy, iz));
}
// Update dDz
if(ix >= 1 && ix < nx-1 &&
iy >= 1 && iy < ny-1 &&
iz >= 0 && iz < nz-1) {
OFFSET(dzTarget, ix, iy, iz) = OFFSET(dzSource, ix, iy, iz) +
x * (OFFSET(hy, ix, iy, iz) - OFFSET(hy, ix-1, iy, iz)) -
y * (OFFSET(hx, ix, iy, iz) - OFFSET(hx, ix, iy-1, iz));
}
}
__global__ void updateEField(float *exTarget, float *eyTarget, float *ezTarget,
float *exSource0, float *eySource0, float *ezSource0,
float *exSource1, float *eySource1, float *ezSource1,
float *dxSource0, float *dySource0, float *dzSource0,
float *dxSource1, float *dySource1, float *dzSource1,
float *dxSource2, float *dySource2, float *dzSource2,
float *sigma, float *epsI, float *epsS, float *tauD)
{
int nx = dNx;
int ny = dNy;
int nz = dNz;
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
int iz = threadIdx.z + blockIdx.z * blockDim.z;
float a = 2.0 * dEps0 * OFFSET(epsI, ix, iy, iz) * OFFSET(tauD, ix, iy, iz);
float b = a + 2.0 * dDt * (dEps0 * OFFSET(epsS, ix, iy, iz) + OFFSET(sigma, ix, iy, iz) * OFFSET(tauD, ix, iy, iz));
float c = 1.0/(b + OFFSET(sigma, ix, iy, iz) * dDt * dDt);
float d = (2.0 * b - OFFSET(sigma, ix, iy, iz) * dDt * dDt);
// Update ex
if(ix >= 0 && ix < nx-1 &&
iy >= 1 && iy < ny-1 &&
iz >= 1 && iz < nz-1) {
OFFSET(exTarget, ix, iy, iz) = c *
(d * OFFSET(exSource0, ix, iy, iz) -
a * OFFSET(exSource1, ix, iy, iz) +
(2.0 * (dDt + OFFSET(tauD, ix, iy, iz))) * OFFSET(dxSource0, ix, iy, iz) -
(2.0 * dDt + 4.0 * OFFSET(tauD, ix, iy, iz)) * OFFSET(dxSource1, ix, iy, iz) +
(2.0 * OFFSET(tauD, ix, iy, iz)) * OFFSET(dxSource2, ix, iy, iz)
);
}
// Update ey
if(ix >= 1 && ix <= nx-1 &&
iy >= 0 && iy <= ny-1 &&
iz >= 1 && iz <= nz-1) {
OFFSET(eyTarget, ix, iy, iz) = c *
(d * OFFSET(eySource0, ix, iy, iz) -
a * OFFSET(eySource1, ix, iy, iz) +
(2.0 * (dDt + OFFSET(tauD, ix, iy, iz))) * OFFSET(dySource0, ix, iy, iz) -
(2.0 * dDt + 4.0 * OFFSET(tauD, ix, iy, iz)) * OFFSET(dySource1, ix, iy, iz) +
(2.0 * OFFSET(tauD, ix, iy, iz)) * OFFSET(dySource2, ix, iy, iz)
);
}
// Update ez
if(ix >= 1 && ix <= nx-1 &&
iy >= 0 && iy <= ny-1 &&
iz >= 1 && iz <= nz-1) {
OFFSET(ezTarget, ix, iy, iz) = c *
(d * OFFSET(ezSource0, ix, iy, iz) -
a * OFFSET(ezSource1, ix, iy, iz) +
(2.0 * (dDt + OFFSET(tauD, ix, iy, iz))) * OFFSET(dzSource0, ix, iy, iz) -
(2.0 * dDt + 4.0 * OFFSET(tauD, ix, iy, iz)) * OFFSET(dzSource1, ix, iy, iz) +
(2.0 * OFFSET(tauD, ix, iy, iz)) * OFFSET(dzSource2, ix, iy, iz)
);
}
}
__global__ void updateSources(float *dzTarget, float *dzSource,
float *hx, float *hy,
int currIteration)
{
int nx = dNx;
int ny = dNy;
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
int iz = threadIdx.z + blockIdx.z * blockDim.z;
// Update source
if(ix == 0 && iy == 0 && iz == 0) {
for(int i=0; i < dSourcesCount; i++) {
int x = dSources[i * 3 + 0];
int y = dSources[i * 3 + 1];
int z = dSources[i * 3 + 2];
float val = OFFSET(dzSource, x, y, z) +
dDt/dDx * (OFFSET(hy, x, y, z) - OFFSET(hy, x-1, y, z)) -
dDt/dDy * (OFFSET(hx, x, y, z) - OFFSET(hx, x, y-1, z)) -
dJz[currIteration];
OFFSET(dzTarget, x, y, z) = val;
}
}
}
__global__ void updateMurBoundary(float *exTarget, float *eyTarget, float *ezTarget,
float *exSource, float *eySource, float *ezSource,
float *rpx0, float *rpy0, float *rpz0,
float *rpxEnd, float *rpyEnd, float *rpzEnd)
{
int nx = dNx;
int ny = dNy;
int nz = dNz;
int rpnx, rpny;
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
int iz = threadIdx.z + blockIdx.z * blockDim.z;
// Update ex
// for rpy
rpnx = dNx;
rpny = 2;
if(ix >= 0 && ix < nx-1 &&
iy == 0 &&
iz >= 1 && iz < nz-1) {
OFFSET(exTarget, ix, iy, iz) = 1/(dDt + dDy * sqrt(dMu0 * dEps0 * OFFSETRP(rpy0, ix, 0, iz))) *
(
(dDt - dDy * sqrt(dMu0 * dEps0 * OFFSETRP(rpy0, ix, 1, iz))) *
OFFSET(exTarget, ix, iy+1, iz) +
(dDt + dDy * sqrt(dMu0 * dEps0 * OFFSETRP(rpy0, ix, 1, iz))) *
OFFSET(exSource, ix, iy+1, iz) -
(dDt - dDy * sqrt(dMu0 * dEps0 * OFFSETRP(rpy0, ix, 0, iz))) *
OFFSET(exSource, ix, iy, iz)
);
}
if(ix >= 0 && ix < nx-1 &&
iy == ny-1 &&
iz >= 1 && iz < nz-1) {
OFFSET(exTarget, ix, iy, iz) = 1/(dDt + dDy * sqrt(dMu0 * dEps0 * OFFSETRP(rpyEnd, ix, 1, iz))) *
(
(dDt - dDy * sqrt(dMu0 * dEps0 * OFFSETRP(rpyEnd, ix, 0, iz))) *
OFFSET(exTarget, ix, iy-1, iz) +
(dDt + dDy * sqrt(dMu0 * dEps0 * OFFSETRP(rpyEnd, ix, 0, iz))) *
OFFSET(exSource, ix, iy-1, iz) -
(dDt - dDy * sqrt(dMu0 * dEps0 * OFFSETRP(rpyEnd, ix, 1, iz))) *
OFFSET(exSource, ix, iy, iz)
);
}
// for rpz
rpnx = dNx;
rpny = dNy;
if(ix >= 0 && ix < nx-1 &&
iy >= 1 && iy < ny-1 &&
iz == 0) {
OFFSET(exTarget, ix, iy, iz) = 1/(dDt + dDz * sqrt(dMu0 * dEps0 * OFFSETRP(rpz0, ix, iy, 0))) *
(
(dDt - dDz * sqrt(dMu0 * dEps0 * OFFSETRP(rpz0, ix, iy, 1))) *
OFFSET(exTarget, ix, iy, iz+1) +
(dDt + dDz * sqrt(dMu0 * dEps0 * OFFSETRP(rpz0, ix, iy, 1))) *
OFFSET(exSource, ix, iy, iz+1) -
(dDt - dDz * sqrt(dMu0 * dEps0 * OFFSETRP(rpz0, ix, iy, 0))) *
OFFSET(exSource, ix, iy, iz)
);
}
if(ix >= 0 && ix < nx-1 &&
iy >= 1 && iy < ny-1 &&
iz == nz-1) {
OFFSET(exTarget, ix, iy, iz) = 1/(dDt + dDz * sqrt(dMu0 * dEps0 * OFFSETRP(rpzEnd, ix, iy, 1))) *
(
(dDt - dDz * sqrt(dMu0 * dEps0 * OFFSETRP(rpzEnd, ix, iy, 0))) *
OFFSET(exTarget, ix, iy, iz-1) +
(dDt + dDz * sqrt(dMu0 * dEps0 * OFFSETRP(rpzEnd, ix, iy, 0))) *
OFFSET(exSource, ix, iy, iz-1) -
(dDt - dDz * sqrt(dMu0 * dEps0 * OFFSETRP(rpzEnd, ix, iy, 1))) *
OFFSET(exSource, ix, iy, iz)
);
}
// Update ey
// for rpx
rpnx = 2;
rpny = dNy;
if(ix == 0 &&
iy >= 0 && iy < ny-1 &&
iz >= 1 && iz < nz-1) {
OFFSET(eyTarget, ix, iy, iz) = 1/(dDt + dDx * sqrt(dMu0 * dEps0 * OFFSETRP(rpx0, 0, iy, iz))) *
(
(dDt - dDx * sqrt(dMu0 * dEps0 * OFFSETRP(rpx0, 1, iy, iz))) *
OFFSET(eyTarget, ix+1, iy, iz) +
(dDt + dDx * sqrt(dMu0 * dEps0 * OFFSETRP(rpx0, 1, iy, iz))) *
OFFSET(eySource, ix+1, iy, iz) -
(dDt - dDx * sqrt(dMu0 * dEps0 * OFFSETRP(rpx0, 0, iy, iz))) *
OFFSET(eySource, ix, iy, iz)
);
}
if(ix == nx-1 &&
iy >= 0 && iy < ny-1 &&
iz >= 1 && iz < nz-1) {
OFFSET(eyTarget, ix, iy, iz) = 1/(dDt + dDx * sqrt(dMu0 * dEps0 * OFFSETRP(rpxEnd, 1, iy, iz))) *
(
(dDt - dDx * sqrt(dMu0 * dEps0 * OFFSETRP(rpxEnd, 0, iy, iz))) *
OFFSET(eySource, ix-1, iy, iz) +
(dDt + dDx * sqrt(dMu0 * dEps0 * OFFSETRP(rpxEnd, 0, iy, iz))) *
OFFSET(eySource, ix-1, iy, iz) -
(dDt - dDx * sqrt(dMu0 * dEps0 * OFFSETRP(rpxEnd, 1, iy, iz))) *
OFFSET(eySource, ix, iy, iz)
);
}
// for rpz
rpnx = dNx;
rpny = dNy;
if(ix >= 1 && ix < nx-1 &&
iy >= 0 && iy < ny-1 &&
iz == 0) {
OFFSET(eyTarget, ix, iy, iz) = 1/(dDt + dDz * sqrt(dMu0 * dEps0 * OFFSETRP(rpz0, ix, iy, 0))) *
(
(dDt - dDz * sqrt(dMu0 * dEps0 * OFFSETRP(rpz0, ix, iy, 1))) *
OFFSET(eyTarget, ix, iy,iz+1) +
(dDt + dDz * sqrt(dMu0 * dEps0 * OFFSETRP(rpz0, ix, iy, 1))) *
OFFSET(eySource, ix, iy, iz+1) -
(dDt - dDz * sqrt(dMu0 * dEps0 * OFFSETRP(rpz0, ix, iy, 0))) *
OFFSET(eySource, ix, iy, iz)
);
}
if(ix >= 1 && ix < nx-1 &&
iy >= 0 && iy < ny-1 &&
iz == nz-1) {
OFFSET(eyTarget, ix, iy, iz) = 1/(dDt + dDz * sqrt(dMu0 * dEps0 * OFFSETRP(rpzEnd, ix, iy, 1))) *
(
(dDt - dDz * sqrt(dMu0 * dEps0 * OFFSETRP(rpzEnd, ix, iy, 0))) *
OFFSET(eyTarget, ix, iy, iz-1) +
(dDt + dDz * sqrt(dMu0 * dEps0 * OFFSETRP(rpzEnd, ix, iy, 0))) *
OFFSET(eySource, ix, iy, iz-1) -
(dDt - dDz *sqrt(dMu0 * dEps0 * OFFSETRP(rpzEnd, ix, iy, 1))) *
OFFSET(eySource, ix, iy, iz)
);
}
// Update ez
// for rpz
rpnx = 2;
rpny = dNy;
if(ix == 0 &&
iy >= 1 && iy < ny-1 &&
iz >= 0 && iz < nz-1) {
OFFSET(ezTarget, ix, iy, iz) = 1/(dDt + dDx * sqrt(dMu0 * dEps0 * OFFSETRP(rpx0, 0, iy, iz))) *
(
(dDt - dDx * sqrt(dMu0 * dEps0 * OFFSETRP(rpx0, 1, iy, iz))) *
OFFSET(ezTarget, ix+1, iy, iz) +
(dDt + dDx * sqrt(dMu0 * dEps0 * OFFSETRP(rpx0, 1, iy, iz))) *
OFFSET(ezSource, ix+1, iy, iz) -
(dDt - dDx * sqrt(dMu0 * dEps0 * OFFSETRP(rpx0, 0, iy, iz))) *
OFFSET(ezSource, ix, iy, iz)
);
}
if(ix == nx-1 &&
iy >= 1 && iy < ny-1 &&
iz >= 0 && iz < nz-1) {
OFFSET(ezTarget, ix, iy, iz) = 1/(dDt + dDx * sqrt(dMu0 * dEps0 * OFFSETRP(rpxEnd, 1, iy, iz))) *
(
(dDt - dDx * sqrt(dMu0 * dEps0 * OFFSETRP(rpxEnd, 0, iy, iz))) *
OFFSET(ezTarget, ix-1, iy, iz) +
(dDt + dDx * sqrt(dMu0 * dEps0 * OFFSETRP(rpxEnd, 0, iy, iz))) *
OFFSET(ezSource, ix-1, iy, iz) -
(dDt - dDx * sqrt(dMu0 * dEps0 * OFFSETRP(rpxEnd, 1, iy, iz))) *
OFFSET(ezSource, ix, iy, iz)
);
}
// for rpy
rpnx = dNx;
rpny = 2;
if(ix >= 1 && ix < nx-1 &&
iy == 0 &&
iz >= 0 && iz < nz-1) {
OFFSET(ezTarget, ix, iy, iz) = 1/(dDt + dDy * sqrt(dMu0 * dEps0 * OFFSETRP(rpy0, ix, 0, iz))) *
(
(dDt - dDy * sqrt(dMu0 * dEps0 * OFFSETRP(rpy0, ix, 1, iz))) *
OFFSET(ezTarget, ix, iy+1, iz) +
(dDt + dDy * sqrt(dMu0 * dEps0 * OFFSETRP(rpy0, ix, 1, iz))) *
OFFSET(ezSource, ix, iy+1, iz) -
(dDt - dDy * sqrt(dMu0 * dEps0 * OFFSETRP(rpy0, ix, 0, iz))) *
OFFSET(ezSource, ix, iy, iz)
);
}
if(ix >= 1 && ix < nx-1 &&
iy == ny-1 &&
iz >= 0 && iz < nz-1) {
OFFSET(ezTarget, ix, iy, iz) = 1/(dDt + dDy * sqrt(dMu0 * dEps0 * OFFSETRP(rpyEnd, ix, 1, iz))) *
(
(dDt - dDy * sqrt(dMu0 * dEps0 * OFFSETRP(rpyEnd, ix, 0, iz))) *
OFFSET(ezTarget, ix, iy-1, iz) +
(dDt + dDy * sqrt(dMu0 * dEps0 * OFFSETRP(rpyEnd, ix, 0, iz))) *
OFFSET(ezSource, ix, iy-1, iz) -
(dDt - dDy * sqrt(dMu0 * dEps0 * OFFSETRP(rpyEnd, ix, 1, iz))) *
OFFSET(ezSource, ix, iy, iz)
);
}
}
|
28de10a57265313160f9ab891a564db14b310693.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "saber/funcs/impl/cuda/saber_lstm.h"
#include "saber/core/tensor_op.h"
#include "cuda_inline_activation.h"
namespace anakin {
namespace saber {
template <typename Dtype>
__global__ void cal_lstm_kernel_batch_with_peephole_anyactivate(
const Dtype* w_x, const Dtype* b_i, const Dtype* b_f, const Dtype* b_c, const Dtype* b_o,
const Dtype* w_ci, const Dtype* w_cf, const Dtype* w_co, Dtype* cell,const int hidden_size,
const int aligned_hidden_size,const int batch_size,const int word_start_id,
const ActiveType gate_activity, const ActiveType cell_activity,const ActiveType candidate_activity,Dtype* output
) {
const int thread_id = blockIdx.x*blockDim.x+threadIdx.x;
const int batch_id = thread_id/aligned_hidden_size;
const int tid=thread_id%aligned_hidden_size;
if (tid < hidden_size && batch_id<batch_size) {
Dtype(*gat_act)(const Dtype)=Activate_inner<Dtype>(gate_activity);
Dtype(*cell_act)(const Dtype)=Activate_inner<Dtype>(cell_activity);
Dtype(*candi_act)(const Dtype)=Activate_inner<Dtype>(candidate_activity);
const int emit_wx_offset = (word_start_id + batch_id) * hidden_size * 4;
const Dtype* w_x_i = w_x + emit_wx_offset;
const Dtype* w_x_f = w_x_i + hidden_size ;
const Dtype* w_x_c = w_x_f + hidden_size;
const Dtype* w_x_o = w_x_c + hidden_size;
Dtype* gate_h_p = output + batch_id * hidden_size;
Dtype* gate_c_p = cell + batch_id * hidden_size;
const Dtype c_1 = gate_c_p[tid];
const Dtype gate_i = gat_act(w_x_i[tid] + b_i[tid] + w_ci[tid] * c_1);
const Dtype gate_f = gat_act(w_x_f[tid] + b_f[tid] + w_cf[tid] * c_1);
const Dtype gate_c_s = cell_act(w_x_c[tid] + b_c[tid]);
const Dtype gate_c = gate_f * c_1 + gate_i * gate_c_s;
const Dtype gate_o = gat_act(w_x_o[tid] + b_o[tid] + gate_c * w_co[tid]);
gate_c_p[tid] = gate_c;
gate_h_p[tid] = gate_o * candi_act(gate_c);
}
}
template <typename Dtype>
__global__ void cal_lstm_kernel_batch_without_peephole_anyactivate(
const Dtype* w_x,const Dtype* b_i, const Dtype* b_f, const Dtype* b_c, const Dtype* b_o, Dtype* cell,
const int hidden_size, const int aligned_hidden_size,const int batch_size,const int word_start_id, const ActiveType gate_activity,const ActiveType cell_activity,const ActiveType candidate_activity,
Dtype* output) {
const int thread_id = blockIdx.x*blockDim.x+threadIdx.x;
const int batch_id = thread_id/aligned_hidden_size;
const int tid=thread_id%aligned_hidden_size;
if (tid < hidden_size && batch_id<batch_size) {
Dtype(*gat_act)(const Dtype)=Activate_inner<Dtype>(gate_activity);
Dtype(*cell_act)(const Dtype)=Activate_inner<Dtype>(cell_activity);
Dtype(*candi_act)(const Dtype)=Activate_inner<Dtype>(candidate_activity);
const int emit_wx_offset = (word_start_id + batch_id) * hidden_size * 4;
const Dtype* w_x_i = w_x + emit_wx_offset;
const Dtype* w_x_f = w_x_i + hidden_size ;
const Dtype* w_x_c = w_x_f + hidden_size;
const Dtype* w_x_o = w_x_c + hidden_size;
Dtype* gate_h_p = output + batch_id * hidden_size;
Dtype* gate_c_p = cell + batch_id * hidden_size;
const Dtype c_1 = gate_c_p[tid];
const Dtype gate_i = gat_act(w_x_i[tid] + b_i[tid]);
const Dtype gate_f = gat_act(w_x_f[tid] + b_f[tid]);
const Dtype gate_c_s = cell_act(w_x_c[tid] + b_c[tid]);
const Dtype gate_c = gate_f * c_1 + gate_i * gate_c_s;
const Dtype gate_o = gat_act(w_x_o[tid] + b_o[tid]);
gate_c_p[tid] = gate_c;
gate_h_p[tid] = gate_o * candi_act(gate_c);
// printf("tid = %d, f = %f, i = %f, o = %f, hout = %f, w_x_i = %f, c_i = %f,c_out = %f, batch_id = %d\n",tid,gate_f,gate_i,gate_o,gate_h_p[tid],w_x_i[tid],c_1,gate_c,batch_id);
}
}
template <typename Dtype>
__global__ void cal_lstm_kernel_batch_with_peephole(
const Dtype* w_x, const Dtype* b_i, const Dtype* b_f, const Dtype* b_c, const Dtype* b_o,
const Dtype* w_ci, const Dtype* w_cf, const Dtype* w_co, Dtype* cell,const int hidden_size,
const int aligned_hidden_size,const int batch_size, const int word_start_id,
Dtype* output) {
const int thread_id = blockIdx.x*blockDim.x+threadIdx.x;
const int batch_id = thread_id/aligned_hidden_size;
const int tid=thread_id%aligned_hidden_size;
if (tid < hidden_size && batch_id<batch_size) {
const int emit_wx_offset = (word_start_id + batch_id) * hidden_size * 4;
const Dtype* w_x_i = w_x + emit_wx_offset;
const Dtype* w_x_f = w_x_i + hidden_size ;
const Dtype* w_x_c = w_x_f + hidden_size;
const Dtype* w_x_o = w_x_c + hidden_size;
Dtype* gate_h_p = output + batch_id * hidden_size;
Dtype* gate_c_p = cell + batch_id * hidden_size;
const Dtype c_1 = gate_c_p[tid];
const Dtype gate_i = Sigmoid(w_x_i[tid] + b_i[tid] + w_ci[tid] * c_1);
const Dtype gate_f = Sigmoid(w_x_f[tid] + b_f[tid] + w_cf[tid] * c_1);
const Dtype gate_c_s = Tanh(w_x_c[tid] + b_c[tid]);
const Dtype gate_c = gate_f * c_1 + gate_i * gate_c_s;
const Dtype gate_o = Sigmoid(w_x_o[tid] + b_o[tid] + gate_c * w_co[tid]);
gate_c_p[tid] = gate_c;
gate_h_p[tid] = gate_o * Tanh(gate_c);
// printf("tid = %d, f = %f, i = %f, o = %f, hout = %f, w_x_i = %f, c_i = %f,c_out = %f, batch_id = %d\n",tid,gate_f,gate_i,gate_o,gate_h_p[tid],w_x_i[tid],c_1,gate_c,batch_id);
}
}
template <typename Dtype>
__global__ void cal_lstm_kernel_batch_without_peephole(
const Dtype* w_x,const Dtype* b_i, const Dtype* b_f, const Dtype* b_c, const Dtype* b_o, Dtype* cell,
const int hidden_size, const int aligned_hidden_size,const int batch_size,const int word_start_id, Dtype* output) {
const int thread_id = blockIdx.x*blockDim.x+threadIdx.x;
const int batch_id = thread_id/aligned_hidden_size;
const int tid=thread_id%aligned_hidden_size;
if (tid < hidden_size && batch_id<batch_size) {
const int emit_wx_offset = (word_start_id + batch_id) * hidden_size * 4;
const Dtype* w_x_i = w_x + emit_wx_offset;
const Dtype* w_x_f = w_x_i + hidden_size ;
const Dtype* w_x_c = w_x_f + hidden_size;
const Dtype* w_x_o = w_x_c + hidden_size;
Dtype* gate_h_p = output + batch_id * hidden_size;
Dtype* gate_c_p = cell + batch_id * hidden_size;
const Dtype c_1 = gate_c_p[tid];
const Dtype gate_i = Sigmoid_fluid(w_x_i[tid] + b_i[tid]);
const Dtype gate_f = Sigmoid_fluid(w_x_f[tid] + b_f[tid]);
const Dtype gate_c_s = Tanh_fluid(w_x_c[tid] + b_c[tid]);
const Dtype gate_c = gate_f * c_1 + gate_i * gate_c_s;
const Dtype gate_o = Sigmoid_fluid(w_x_o[tid] + b_o[tid]);
gate_c_p[tid] = gate_c;
gate_h_p[tid] = gate_o * Tanh_fluid(gate_c);
}
}
template<>
SaberStatus
SaberLstm<NV, AK_FLOAT>::dispatch_batch(
const std::vector < Tensor<NV>* >& inputs,
std::vector < Tensor<NV>* >& outputs,
LstmParam < NV >& param) {
Tensor<NV>* x = inputs[0];
std::vector<int> offset_vec = x->get_seq_offset()[x->get_seq_offset().size()-1];
int seq_sum = x->num();
int batch_size = offset_vec.size() - 1;
const OpDataType* x_data = (const OpDataType*)x->data();
const OpDataType *weight_h = (const OpDataType *)(param.weight()->data())+4*_hidden_size*_word_size;
const OpDataType *weight_w = (const OpDataType *)param.weight()->data();
const OpDataType *bias = (const OpDataType *)param.bias()->data();
const OpDataType *weight_peephole = (const OpDataType *)(param.bias()->data())+4*_hidden_size;
const OpDataType* h_init = nullptr;
const OpDataType* inner_x = (const OpDataType *)inputs[0]->data();
OpDataType* inner_h_out = (OpDataType *)outputs[0]->mutable_data();
OpDataType* inner_cell = nullptr;
_gemm_wx = saber_find_fast_sass_gemm(false, false, seq_sum, 4 * _hidden_size,_word_size);
_gemm_wh = saber_find_fast_sass_gemm(false, false, batch_size, 4 * _hidden_size, _hidden_size);
utils::try_expand_tensor(_temp_map_dev,seq_sum);
bool transform = _seq_util.get_sorted_map(offset_vec, this->_ctx->get_compute_stream());
std::vector<int> emit_offset_vec=_seq_util.get_emit_offset_vec();
int emit_length = emit_offset_vec.size()-1;
if (inputs.size() > 1) {
h_init = (const OpDataType *)inputs[1]->data();
utils::try_expand_tensor(_init_hidden,batch_size * _hidden_size);
h_init = (const OpDataType *)_init_hidden.data();
} else if (param.init_hidden() != nullptr) {
h_init = (const OpDataType *)param.init_hidden()->data();
//FIXME:is it correct?
} else {
if (_temp_zero.valid_size() < batch_size * _hidden_size) {
utils::try_expand_tensor(_temp_zero,batch_size * _hidden_size);
CUDA_CHECK(hipMemsetAsync(_temp_zero.mutable_data(), 0,
sizeof(OpDataType)*batch_size * _hidden_size,
_ctx->get_compute_stream()));
}
h_init = (const OpDataType *)_temp_zero.data();
}
utils::try_expand_tensor(_temp_wx,seq_sum * 4 * _hidden_size);
utils::try_expand_tensor(_temp_wh,batch_size * 4 * _hidden_size);
utils::try_expand_tensor(_temp_out,seq_sum * _hidden_size * param.num_direction);
utils::try_expand_tensor(_temp_cell,batch_size * _hidden_size);
if (transform) {
utils::try_expand_tensor(_temp_x,seq_sum * _word_size);
_seq_util.seq_2_sorted_seq(x_data, (OpDataType *)_temp_x.mutable_data(), _word_size, _ctx->get_compute_stream());
inner_h_out = (OpDataType *)_temp_out.mutable_data();
inner_x = (OpDataType *)_temp_x.mutable_data();
if (inputs.size() > 1 || param.init_hidden() != nullptr) {
CHECK(false) << "not support inner_h_init != nullptr";
}
}
inner_cell = (OpDataType *)_temp_cell.mutable_data();
CUDA_CHECK(hipMemsetAsync(inner_cell, 0, sizeof(OpDataType)*batch_size * _hidden_size,
_ctx->get_compute_stream()));
OpDataType* temp_wh = (OpDataType *)_temp_wh.mutable_data();
OpDataType* temp_wx = (OpDataType *)_temp_wx.mutable_data();
_gemm_wx(seq_sum, 4 * _hidden_size, _word_size, 1.0, inner_x, 0.0, weight_w, temp_wx,
_ctx->get_compute_stream());
const int i_offset = 0;
const int f_offset = 1;
const int c_offset = 2;
const int o_offset = 3;
const OpDataType* b_i = bias + i_offset * _hidden_size;
const OpDataType* b_f = bias + f_offset * _hidden_size;
const OpDataType* b_c = bias + c_offset * _hidden_size;
const OpDataType* b_o = bias + o_offset * _hidden_size;
const OpDataType* w_ci = nullptr;
const OpDataType* w_cf =nullptr;
const OpDataType* w_co =nullptr;
if(param.with_peephole){
w_ci = weight_peephole + 0 * _hidden_size;
w_cf = weight_peephole + 1 * _hidden_size;
w_co = weight_peephole + 2 * _hidden_size;
}
for (int word_id = 0; word_id < emit_length; word_id++) {
int real_word_id = word_id;
int last_word_id = word_id - 1;
if (param.is_reverse && batch_size == 1) {
real_word_id = emit_length - word_id - 1;
last_word_id = real_word_id + 1;
}
int emit_word_id_start = emit_offset_vec[real_word_id];
int emit_word_id_end = emit_offset_vec[real_word_id + 1];
int emit_word_length = emit_word_id_end - emit_word_id_start;
const OpDataType* hin;
if (word_id == 0) {
hin = h_init;
} else {
hin = inner_h_out + emit_offset_vec[last_word_id] * _hidden_size;
}
// DLOG(INFO) << "word_id = " << word_id << ",emit_start = " << emit_word_id_start << ",emit_end=" <<emit_word_id_end;
OpDataType* hout = nullptr;
hout = emit_offset_vec[real_word_id] * _hidden_size + inner_h_out;
//wh
_gemm_wh(emit_word_length, 4 * _hidden_size, _hidden_size, 1.0, hin, 1.f,
weight_h,
temp_wx+emit_word_id_start*4*_hidden_size, _ctx->get_compute_stream());
const int block_dim=512;
const int grid_dim=round_up(emit_word_length*_aligned_hidden_size,block_dim);
if (param.gate_activity == Active_sigmoid && param.cell_activity == Active_tanh
&& param.candidate_activity == Active_tanh) {
if (param.with_peephole) {
cal_lstm_kernel_batch_with_peephole << <grid_dim, block_dim , 0
, _ctx->get_compute_stream() >> >
(temp_wx, b_i,b_f,b_c,b_o, w_ci,w_cf,w_co, inner_cell, _hidden_size,_aligned_hidden_size,emit_word_length, emit_word_id_start, hout);
} else {
cal_lstm_kernel_batch_without_peephole << < grid_dim, block_dim , 0
, _ctx->get_compute_stream() >> >
(temp_wx, b_i,b_f,b_c,b_o, inner_cell, _hidden_size, _aligned_hidden_size,emit_word_length,emit_word_id_start, hout);
}
} else {
if (param.with_peephole) {
cal_lstm_kernel_batch_with_peephole_anyactivate << < grid_dim, block_dim , 0
, _ctx->get_compute_stream() >> >
(temp_wx, b_i, b_f, b_c, b_o, w_ci, w_cf, w_co, inner_cell, _hidden_size, _aligned_hidden_size,emit_word_length,emit_word_id_start, param.gate_activity,
param.cell_activity, param.candidate_activity, hout);
} else{
cal_lstm_kernel_batch_without_peephole_anyactivate << < grid_dim, block_dim , 0
, _ctx->get_compute_stream() >> >
(temp_wx, b_i, b_f, b_c, b_o, inner_cell, _hidden_size,_aligned_hidden_size,emit_word_length, emit_word_id_start, param.gate_activity,
param.cell_activity, param.candidate_activity, hout);
}
}
}
if (transform) {
_seq_util.sorted_seq_2_seq((const OpDataType *)_temp_out.data(), (OpDataType *)outputs[0]->mutable_data(), _hidden_size,
_ctx->get_compute_stream());
}
outputs[0]->set_seq_offset(inputs[0]->get_seq_offset());
return SaberSuccess;
};
//TODO:complate dispatch_once
template<>
SaberStatus
SaberLstm<NV, AK_FLOAT>::dispatch_once(
const std::vector < Tensor<NV>* >& inputs,
std::vector < Tensor<NV>* >& outputs,
LstmParam < NV >& param) {
return SaberSuccess;
};
template<>
SaberStatus
SaberLstm<NV, AK_FLOAT>::dispatch(
const std::vector < Tensor<NV>* >& inputs,
std::vector < Tensor<NV>* >& outputs,
LstmParam < NV >& param) {
CHECK_EQ(inputs.size(),1)<<"only support input size = 1";
CHECK_EQ(outputs.size(),1)<<"only support outputs size = 1";
CHECK_EQ(param.init_hidden()==nullptr, true )<<"only support param.init_hidden() == nullptr";
CHECK_EQ(param.num_layers,1)<<"only support param.num_layers==1";
return dispatch_batch(inputs, outputs, param);
}
DEFINE_OP_TEMPLATE(SaberLstm, LstmParam, NV, AK_HALF);
DEFINE_OP_TEMPLATE(SaberLstm, LstmParam, NV, AK_INT8);
}
}
|
28de10a57265313160f9ab891a564db14b310693.cu
|
#include "saber/funcs/impl/cuda/saber_lstm.h"
#include "saber/core/tensor_op.h"
#include "cuda_inline_activation.h"
namespace anakin {
namespace saber {
template <typename Dtype>
__global__ void cal_lstm_kernel_batch_with_peephole_anyactivate(
const Dtype* w_x, const Dtype* b_i, const Dtype* b_f, const Dtype* b_c, const Dtype* b_o,
const Dtype* w_ci, const Dtype* w_cf, const Dtype* w_co, Dtype* cell,const int hidden_size,
const int aligned_hidden_size,const int batch_size,const int word_start_id,
const ActiveType gate_activity, const ActiveType cell_activity,const ActiveType candidate_activity,Dtype* output
) {
const int thread_id = blockIdx.x*blockDim.x+threadIdx.x;
const int batch_id = thread_id/aligned_hidden_size;
const int tid=thread_id%aligned_hidden_size;
if (tid < hidden_size && batch_id<batch_size) {
Dtype(*gat_act)(const Dtype)=Activate_inner<Dtype>(gate_activity);
Dtype(*cell_act)(const Dtype)=Activate_inner<Dtype>(cell_activity);
Dtype(*candi_act)(const Dtype)=Activate_inner<Dtype>(candidate_activity);
const int emit_wx_offset = (word_start_id + batch_id) * hidden_size * 4;
const Dtype* w_x_i = w_x + emit_wx_offset;
const Dtype* w_x_f = w_x_i + hidden_size ;
const Dtype* w_x_c = w_x_f + hidden_size;
const Dtype* w_x_o = w_x_c + hidden_size;
Dtype* gate_h_p = output + batch_id * hidden_size;
Dtype* gate_c_p = cell + batch_id * hidden_size;
const Dtype c_1 = gate_c_p[tid];
const Dtype gate_i = gat_act(w_x_i[tid] + b_i[tid] + w_ci[tid] * c_1);
const Dtype gate_f = gat_act(w_x_f[tid] + b_f[tid] + w_cf[tid] * c_1);
const Dtype gate_c_s = cell_act(w_x_c[tid] + b_c[tid]);
const Dtype gate_c = gate_f * c_1 + gate_i * gate_c_s;
const Dtype gate_o = gat_act(w_x_o[tid] + b_o[tid] + gate_c * w_co[tid]);
gate_c_p[tid] = gate_c;
gate_h_p[tid] = gate_o * candi_act(gate_c);
}
}
template <typename Dtype>
__global__ void cal_lstm_kernel_batch_without_peephole_anyactivate(
const Dtype* w_x,const Dtype* b_i, const Dtype* b_f, const Dtype* b_c, const Dtype* b_o, Dtype* cell,
const int hidden_size, const int aligned_hidden_size,const int batch_size,const int word_start_id, const ActiveType gate_activity,const ActiveType cell_activity,const ActiveType candidate_activity,
Dtype* output) {
const int thread_id = blockIdx.x*blockDim.x+threadIdx.x;
const int batch_id = thread_id/aligned_hidden_size;
const int tid=thread_id%aligned_hidden_size;
if (tid < hidden_size && batch_id<batch_size) {
Dtype(*gat_act)(const Dtype)=Activate_inner<Dtype>(gate_activity);
Dtype(*cell_act)(const Dtype)=Activate_inner<Dtype>(cell_activity);
Dtype(*candi_act)(const Dtype)=Activate_inner<Dtype>(candidate_activity);
const int emit_wx_offset = (word_start_id + batch_id) * hidden_size * 4;
const Dtype* w_x_i = w_x + emit_wx_offset;
const Dtype* w_x_f = w_x_i + hidden_size ;
const Dtype* w_x_c = w_x_f + hidden_size;
const Dtype* w_x_o = w_x_c + hidden_size;
Dtype* gate_h_p = output + batch_id * hidden_size;
Dtype* gate_c_p = cell + batch_id * hidden_size;
const Dtype c_1 = gate_c_p[tid];
const Dtype gate_i = gat_act(w_x_i[tid] + b_i[tid]);
const Dtype gate_f = gat_act(w_x_f[tid] + b_f[tid]);
const Dtype gate_c_s = cell_act(w_x_c[tid] + b_c[tid]);
const Dtype gate_c = gate_f * c_1 + gate_i * gate_c_s;
const Dtype gate_o = gat_act(w_x_o[tid] + b_o[tid]);
gate_c_p[tid] = gate_c;
gate_h_p[tid] = gate_o * candi_act(gate_c);
// printf("tid = %d, f = %f, i = %f, o = %f, hout = %f, w_x_i = %f, c_i = %f,c_out = %f, batch_id = %d\n",tid,gate_f,gate_i,gate_o,gate_h_p[tid],w_x_i[tid],c_1,gate_c,batch_id);
}
}
template <typename Dtype>
__global__ void cal_lstm_kernel_batch_with_peephole(
const Dtype* w_x, const Dtype* b_i, const Dtype* b_f, const Dtype* b_c, const Dtype* b_o,
const Dtype* w_ci, const Dtype* w_cf, const Dtype* w_co, Dtype* cell,const int hidden_size,
const int aligned_hidden_size,const int batch_size, const int word_start_id,
Dtype* output) {
const int thread_id = blockIdx.x*blockDim.x+threadIdx.x;
const int batch_id = thread_id/aligned_hidden_size;
const int tid=thread_id%aligned_hidden_size;
if (tid < hidden_size && batch_id<batch_size) {
const int emit_wx_offset = (word_start_id + batch_id) * hidden_size * 4;
const Dtype* w_x_i = w_x + emit_wx_offset;
const Dtype* w_x_f = w_x_i + hidden_size ;
const Dtype* w_x_c = w_x_f + hidden_size;
const Dtype* w_x_o = w_x_c + hidden_size;
Dtype* gate_h_p = output + batch_id * hidden_size;
Dtype* gate_c_p = cell + batch_id * hidden_size;
const Dtype c_1 = gate_c_p[tid];
const Dtype gate_i = Sigmoid(w_x_i[tid] + b_i[tid] + w_ci[tid] * c_1);
const Dtype gate_f = Sigmoid(w_x_f[tid] + b_f[tid] + w_cf[tid] * c_1);
const Dtype gate_c_s = Tanh(w_x_c[tid] + b_c[tid]);
const Dtype gate_c = gate_f * c_1 + gate_i * gate_c_s;
const Dtype gate_o = Sigmoid(w_x_o[tid] + b_o[tid] + gate_c * w_co[tid]);
gate_c_p[tid] = gate_c;
gate_h_p[tid] = gate_o * Tanh(gate_c);
// printf("tid = %d, f = %f, i = %f, o = %f, hout = %f, w_x_i = %f, c_i = %f,c_out = %f, batch_id = %d\n",tid,gate_f,gate_i,gate_o,gate_h_p[tid],w_x_i[tid],c_1,gate_c,batch_id);
}
}
template <typename Dtype>
__global__ void cal_lstm_kernel_batch_without_peephole(
const Dtype* w_x,const Dtype* b_i, const Dtype* b_f, const Dtype* b_c, const Dtype* b_o, Dtype* cell,
const int hidden_size, const int aligned_hidden_size,const int batch_size,const int word_start_id, Dtype* output) {
const int thread_id = blockIdx.x*blockDim.x+threadIdx.x;
const int batch_id = thread_id/aligned_hidden_size;
const int tid=thread_id%aligned_hidden_size;
if (tid < hidden_size && batch_id<batch_size) {
const int emit_wx_offset = (word_start_id + batch_id) * hidden_size * 4;
const Dtype* w_x_i = w_x + emit_wx_offset;
const Dtype* w_x_f = w_x_i + hidden_size ;
const Dtype* w_x_c = w_x_f + hidden_size;
const Dtype* w_x_o = w_x_c + hidden_size;
Dtype* gate_h_p = output + batch_id * hidden_size;
Dtype* gate_c_p = cell + batch_id * hidden_size;
const Dtype c_1 = gate_c_p[tid];
const Dtype gate_i = Sigmoid_fluid(w_x_i[tid] + b_i[tid]);
const Dtype gate_f = Sigmoid_fluid(w_x_f[tid] + b_f[tid]);
const Dtype gate_c_s = Tanh_fluid(w_x_c[tid] + b_c[tid]);
const Dtype gate_c = gate_f * c_1 + gate_i * gate_c_s;
const Dtype gate_o = Sigmoid_fluid(w_x_o[tid] + b_o[tid]);
gate_c_p[tid] = gate_c;
gate_h_p[tid] = gate_o * Tanh_fluid(gate_c);
}
}
template<>
SaberStatus
SaberLstm<NV, AK_FLOAT>::dispatch_batch(
const std::vector < Tensor<NV>* >& inputs,
std::vector < Tensor<NV>* >& outputs,
LstmParam < NV >& param) {
Tensor<NV>* x = inputs[0];
std::vector<int> offset_vec = x->get_seq_offset()[x->get_seq_offset().size()-1];
int seq_sum = x->num();
int batch_size = offset_vec.size() - 1;
const OpDataType* x_data = (const OpDataType*)x->data();
const OpDataType *weight_h = (const OpDataType *)(param.weight()->data())+4*_hidden_size*_word_size;
const OpDataType *weight_w = (const OpDataType *)param.weight()->data();
const OpDataType *bias = (const OpDataType *)param.bias()->data();
const OpDataType *weight_peephole = (const OpDataType *)(param.bias()->data())+4*_hidden_size;
const OpDataType* h_init = nullptr;
const OpDataType* inner_x = (const OpDataType *)inputs[0]->data();
OpDataType* inner_h_out = (OpDataType *)outputs[0]->mutable_data();
OpDataType* inner_cell = nullptr;
_gemm_wx = saber_find_fast_sass_gemm(false, false, seq_sum, 4 * _hidden_size,_word_size);
_gemm_wh = saber_find_fast_sass_gemm(false, false, batch_size, 4 * _hidden_size, _hidden_size);
utils::try_expand_tensor(_temp_map_dev,seq_sum);
bool transform = _seq_util.get_sorted_map(offset_vec, this->_ctx->get_compute_stream());
std::vector<int> emit_offset_vec=_seq_util.get_emit_offset_vec();
int emit_length = emit_offset_vec.size()-1;
if (inputs.size() > 1) {
h_init = (const OpDataType *)inputs[1]->data();
utils::try_expand_tensor(_init_hidden,batch_size * _hidden_size);
h_init = (const OpDataType *)_init_hidden.data();
} else if (param.init_hidden() != nullptr) {
h_init = (const OpDataType *)param.init_hidden()->data();
//FIXME:is it correct?
} else {
if (_temp_zero.valid_size() < batch_size * _hidden_size) {
utils::try_expand_tensor(_temp_zero,batch_size * _hidden_size);
CUDA_CHECK(cudaMemsetAsync(_temp_zero.mutable_data(), 0,
sizeof(OpDataType)*batch_size * _hidden_size,
_ctx->get_compute_stream()));
}
h_init = (const OpDataType *)_temp_zero.data();
}
utils::try_expand_tensor(_temp_wx,seq_sum * 4 * _hidden_size);
utils::try_expand_tensor(_temp_wh,batch_size * 4 * _hidden_size);
utils::try_expand_tensor(_temp_out,seq_sum * _hidden_size * param.num_direction);
utils::try_expand_tensor(_temp_cell,batch_size * _hidden_size);
if (transform) {
utils::try_expand_tensor(_temp_x,seq_sum * _word_size);
_seq_util.seq_2_sorted_seq(x_data, (OpDataType *)_temp_x.mutable_data(), _word_size, _ctx->get_compute_stream());
inner_h_out = (OpDataType *)_temp_out.mutable_data();
inner_x = (OpDataType *)_temp_x.mutable_data();
if (inputs.size() > 1 || param.init_hidden() != nullptr) {
CHECK(false) << "not support inner_h_init != nullptr";
}
}
inner_cell = (OpDataType *)_temp_cell.mutable_data();
CUDA_CHECK(cudaMemsetAsync(inner_cell, 0, sizeof(OpDataType)*batch_size * _hidden_size,
_ctx->get_compute_stream()));
OpDataType* temp_wh = (OpDataType *)_temp_wh.mutable_data();
OpDataType* temp_wx = (OpDataType *)_temp_wx.mutable_data();
_gemm_wx(seq_sum, 4 * _hidden_size, _word_size, 1.0, inner_x, 0.0, weight_w, temp_wx,
_ctx->get_compute_stream());
const int i_offset = 0;
const int f_offset = 1;
const int c_offset = 2;
const int o_offset = 3;
const OpDataType* b_i = bias + i_offset * _hidden_size;
const OpDataType* b_f = bias + f_offset * _hidden_size;
const OpDataType* b_c = bias + c_offset * _hidden_size;
const OpDataType* b_o = bias + o_offset * _hidden_size;
const OpDataType* w_ci = nullptr;
const OpDataType* w_cf =nullptr;
const OpDataType* w_co =nullptr;
if(param.with_peephole){
w_ci = weight_peephole + 0 * _hidden_size;
w_cf = weight_peephole + 1 * _hidden_size;
w_co = weight_peephole + 2 * _hidden_size;
}
for (int word_id = 0; word_id < emit_length; word_id++) {
int real_word_id = word_id;
int last_word_id = word_id - 1;
if (param.is_reverse && batch_size == 1) {
real_word_id = emit_length - word_id - 1;
last_word_id = real_word_id + 1;
}
int emit_word_id_start = emit_offset_vec[real_word_id];
int emit_word_id_end = emit_offset_vec[real_word_id + 1];
int emit_word_length = emit_word_id_end - emit_word_id_start;
const OpDataType* hin;
if (word_id == 0) {
hin = h_init;
} else {
hin = inner_h_out + emit_offset_vec[last_word_id] * _hidden_size;
}
// DLOG(INFO) << "word_id = " << word_id << ",emit_start = " << emit_word_id_start << ",emit_end=" <<emit_word_id_end;
OpDataType* hout = nullptr;
hout = emit_offset_vec[real_word_id] * _hidden_size + inner_h_out;
//wh
_gemm_wh(emit_word_length, 4 * _hidden_size, _hidden_size, 1.0, hin, 1.f,
weight_h,
temp_wx+emit_word_id_start*4*_hidden_size, _ctx->get_compute_stream());
const int block_dim=512;
const int grid_dim=round_up(emit_word_length*_aligned_hidden_size,block_dim);
if (param.gate_activity == Active_sigmoid && param.cell_activity == Active_tanh
&& param.candidate_activity == Active_tanh) {
if (param.with_peephole) {
cal_lstm_kernel_batch_with_peephole << <grid_dim, block_dim , 0
, _ctx->get_compute_stream() >> >
(temp_wx, b_i,b_f,b_c,b_o, w_ci,w_cf,w_co, inner_cell, _hidden_size,_aligned_hidden_size,emit_word_length, emit_word_id_start, hout);
} else {
cal_lstm_kernel_batch_without_peephole << < grid_dim, block_dim , 0
, _ctx->get_compute_stream() >> >
(temp_wx, b_i,b_f,b_c,b_o, inner_cell, _hidden_size, _aligned_hidden_size,emit_word_length,emit_word_id_start, hout);
}
} else {
if (param.with_peephole) {
cal_lstm_kernel_batch_with_peephole_anyactivate << < grid_dim, block_dim , 0
, _ctx->get_compute_stream() >> >
(temp_wx, b_i, b_f, b_c, b_o, w_ci, w_cf, w_co, inner_cell, _hidden_size, _aligned_hidden_size,emit_word_length,emit_word_id_start, param.gate_activity,
param.cell_activity, param.candidate_activity, hout);
} else{
cal_lstm_kernel_batch_without_peephole_anyactivate << < grid_dim, block_dim , 0
, _ctx->get_compute_stream() >> >
(temp_wx, b_i, b_f, b_c, b_o, inner_cell, _hidden_size,_aligned_hidden_size,emit_word_length, emit_word_id_start, param.gate_activity,
param.cell_activity, param.candidate_activity, hout);
}
}
}
if (transform) {
_seq_util.sorted_seq_2_seq((const OpDataType *)_temp_out.data(), (OpDataType *)outputs[0]->mutable_data(), _hidden_size,
_ctx->get_compute_stream());
}
outputs[0]->set_seq_offset(inputs[0]->get_seq_offset());
return SaberSuccess;
};
//TODO:complate dispatch_once
template<>
SaberStatus
SaberLstm<NV, AK_FLOAT>::dispatch_once(
const std::vector < Tensor<NV>* >& inputs,
std::vector < Tensor<NV>* >& outputs,
LstmParam < NV >& param) {
return SaberSuccess;
};
template<>
SaberStatus
SaberLstm<NV, AK_FLOAT>::dispatch(
const std::vector < Tensor<NV>* >& inputs,
std::vector < Tensor<NV>* >& outputs,
LstmParam < NV >& param) {
CHECK_EQ(inputs.size(),1)<<"only support input size = 1";
CHECK_EQ(outputs.size(),1)<<"only support outputs size = 1";
CHECK_EQ(param.init_hidden()==nullptr, true )<<"only support param.init_hidden() == nullptr";
CHECK_EQ(param.num_layers,1)<<"only support param.num_layers==1";
return dispatch_batch(inputs, outputs, param);
}
DEFINE_OP_TEMPLATE(SaberLstm, LstmParam, NV, AK_HALF);
DEFINE_OP_TEMPLATE(SaberLstm, LstmParam, NV, AK_INT8);
}
}
|
5babb293e16c8df5e65053888ed71bc9209fb850.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/hip/detail/KernelUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <c10/util/Exception.h>
#include <THH/THHAtomics.cuh>
#include <THH/THHNumerics.cuh>
#include <algorithm>
#include <cfloat>
#include <cmath>
namespace at {
namespace native {
using namespace at::cuda::detail;
namespace {
template <typename scalar_t, typename accscalar_t>
__device__ inline int get_interval(accscalar_t sample,
int index, int inputSize, int outputSize, int poolSize) {
accscalar_t alpha = static_cast<accscalar_t>(inputSize - poolSize) /
static_cast<accscalar_t>(outputSize - 1);
if (index == outputSize - 1) {
return inputSize - poolSize;
} else {
return static_cast<int>((index + sample) * alpha) -
static_cast<int>(sample * alpha);
}
}
template <typename scalar_t>
__global__ void fractional_max_pool2d_out_cuda_frame(
PackedTensorAccessor<scalar_t, 4> output,
PackedTensorAccessor<int64_t, 4> indices,
PackedTensorAccessor<scalar_t, 4> input,
PackedTensorAccessor<scalar_t, 3> samples,
int poolSizeH, int poolSizeW) {
using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>;
int ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
// Each thread generates a specific output point
if (ourOutputPoint < output.size(2) * output.size(3)) {
int outputW = ourOutputPoint % output.size(3);
int outputH = ourOutputPoint / output.size(3);
int poolW = get_interval<scalar_t, accscalar_t>(
static_cast<accscalar_t>(samples[batch][plane][0]),
outputW, input.size(3), output.size(3), poolSizeW);
int poolH = get_interval<scalar_t, accscalar_t>(
static_cast<accscalar_t>(samples[batch][plane][1]),
outputH, input.size(2), output.size(2), poolSizeH);
scalar_t maxVal = at::numeric_limits<scalar_t>::lower_bound();
int maxIndex = poolH * input.size(3) + poolW;
for (int h = poolH; h < poolH + poolSizeH; ++h) {
if (poolSizeW < 2 || poolSizeW > 7) {
for (int w = poolW; w < poolW + poolSizeW; ++w) {
scalar_t val = input[batch][plane][h][w];
// for consistency with THNN, favor the first max
if (val > maxVal || THCNumerics<scalar_t>::isnan(val)) {
maxIndex = h * input.size(3) + w;
maxVal = val;
}
}
} else {
for (int i = 0; i < poolSizeW; ++i) {
int w = i + poolW;
scalar_t val = input[batch][plane][h][w];
// for consistency with THNN, favor the first max
if (val > maxVal || THCNumerics<scalar_t>::isnan(val)) {
maxIndex = h * input.size(3) + w;
maxVal = val;
}
}
}
}
indices[batch][plane][outputH][outputW] = maxIndex;
output[batch][plane][outputH][outputW] = maxVal;
}
}
template <typename scalar_t>
__global__ void fractional_max_pool2d_backward_out_cuda_frame(
PackedTensorAccessor<scalar_t, 4> gradInput,
PackedTensorAccessor<scalar_t, 4> gradOutput,
PackedTensorAccessor<int64_t, 4> indices) {
// Output (h, w) point that this thread is responsible for
int ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
// Each thread generates a specific output point
if (ourOutputPoint < gradOutput.size(2) *
gradOutput.size(3)) {
int outputW = ourOutputPoint % gradOutput.size(3);
int outputH = ourOutputPoint / gradOutput.size(3);
int index = indices[batch][plane][outputH][outputW];
assert(index >= 0);
int inputW = index % gradInput.size(3);
int inputH = index / gradInput.size(3);
assert(inputH < gradInput.size(2));
gpuAtomicAdd(
&gradInput[batch][plane][inputH][inputW],
gradOutput[batch][plane][outputH][outputW]
);
}
}
void fractional_max_pool2d_out_cuda_template(
Tensor & output,
Tensor& indices,
const Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const Tensor& randomSamples) {
int planeDim = 0;
int dimh = 1;
int dimw = 2;
int numBatch = 1;
int ndims = input.ndimension();
TORCH_CHECK(input.numel() > 0,
"fractional_max_pool2d(): expected input to have non-empty ",
"spatial dimensions.");
TORCH_CHECK((ndims == 3 || ndims == 4),
"non-empty 3D or 4D (batch mode) tensor expected for input");
if (ndims == 4) {
numBatch = input.size(0);
planeDim++;
dimh++;
dimw++;
}
/* sizes */
int numPlanes = input.size(planeDim);
int inputH = input.size(dimh);
int inputW = input.size(dimw);
int outputH = output_size[0];
int outputW = output_size[1];
int poolSizeH = pool_size[0];
int poolSizeW = pool_size[1];
TORCH_CHECK(outputH + poolSizeH - 1 <= inputH,
"fractional_max_pool2d(): pool_size height ", poolSizeH,
" too large relative to input height ", inputH);
TORCH_CHECK(outputW + poolSizeW - 1 <= inputW,
"pool_size width ", poolSizeW,
" too large relative to input width ", inputW);
if (ndims == 3) {
/* resize output */
output.resize_({numPlanes, outputH, outputW});
/* indices will contain the locations for each output point */
indices.resize_({numPlanes, outputH, outputW});
} else {
output.resize_({numBatch, numPlanes, outputH, outputW});
indices.resize_({numBatch, numPlanes, outputH, outputW});
}
auto output_ = output;
auto input_ = input;
auto indices_ = indices;
if(ndims == 3) {
output_ = output_.reshape({1, numPlanes, outputH, outputW});
indices_ = indices_.reshape({1, numPlanes, outputH, outputW});
input_ = input_.reshape({1, input.size(0), input.size(1), input.size(2)});
}
// block is limited to 4 warps
// grid handles overflow per each plane
int outputPlaneSize = output_.size(2) *
output_.size(3);
dim3 grid((outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128)
input_.size(1),
input_.size(0));
dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(),
"fractional_max_pool2d_out_cuda_frame",
[&] {
auto devInput = input_.packed_accessor<scalar_t, 4>();
auto devOutput = output_.packed_accessor<scalar_t, 4>();
auto devIndices = indices_.packed_accessor<int64_t, 4>();
auto devSamples = randomSamples.packed_accessor<scalar_t, 3>();
hipLaunchKernelGGL(( fractional_max_pool2d_out_cuda_frame<scalar_t>)
, dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
devOutput, devIndices, devInput, devSamples,
poolSizeH, poolSizeW);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
);
}
void fractional_max_pool2d_backward_out_cuda_template(
Tensor& gradInput,
const Tensor& gradOutput,
const Tensor& input,
IntArrayRef pool_size /* unused */,
IntArrayRef output_size,
const Tensor& indices)
{
int dimh = 1;
int dimw = 2;
int ndims = input.ndimension();
if (ndims == 4) {
dimh++;
dimw++;
}
/* sizes */
int inputH = input.size(dimh);
int inputW = input.size(dimw);
int outputH = output_size[0];
int outputW = output_size[1];
TORCH_CHECK(outputH == gradOutput.size(dimh),
"fractional_max_pool2d(): gradOutput height unexpected");
TORCH_CHECK(outputW == gradOutput.size(dimw),
"fractional_max_pool2d(): gradOutput width unexpected");
/* resize */
gradInput.resize_as_(input);
gradInput.zero_();
auto gradInput_ = gradInput;
auto gradOutput_ = gradOutput;
auto indices_ = indices;
if(ndims == 3) {
gradInput_ = gradInput_.reshape({1, input.size(0), inputH, inputW});
gradOutput_ = gradOutput_.reshape({1, gradOutput.size(0), outputH, outputW});
indices_ = indices_.reshape({1, indices_.size(0), outputH, outputW});
}
/* backprop */
// block is limited to 4 warps
// grid handles overflow per each plane
int outputPlaneSize = gradOutput_.size(2) *
gradOutput_.size(3);
dim3 grid((outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128)
gradInput_.size(1),
gradInput_.size(0));
dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize);
auto devIndices = indices.packed_accessor<int64_t, 4>();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(),
"fractional_max_pool2d_backward_out_cuda_frame",
[&] {
auto devGradInput = gradInput_.packed_accessor<scalar_t, 4>();
auto devGradOutput = gradOutput_.packed_accessor<scalar_t, 4>();
hipLaunchKernelGGL(( fractional_max_pool2d_backward_out_cuda_frame<scalar_t>)
, dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
devGradInput, devGradOutput, devIndices);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
);
}
}// namespace
std::tuple<Tensor&, Tensor&> fractional_max_pool2d_out_cuda(
at::Tensor& output,
at::Tensor& indices,
const at::Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const at::Tensor& randomSamples)
{
fractional_max_pool2d_out_cuda_template(
output,
indices,
input,
pool_size,
output_size,
randomSamples);
return std::tuple<Tensor&, Tensor&>(output, indices);
}
std::tuple<Tensor, Tensor> fractional_max_pool2d_cuda(
const at::Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const at::Tensor& randomSamples)
{
Tensor output = at::empty({0}, input.options());
Tensor indices = at::empty({0}, input.options().dtype(kLong));
fractional_max_pool2d_out_cuda_template(
output,
indices,
input,
pool_size,
output_size,
randomSamples);
return std::tuple<Tensor, Tensor>(output, indices);
}
Tensor& fractional_max_pool2d_backward_out_cuda(
at::Tensor& gradInput,
const at::Tensor& gradOutput_,
const at::Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const at::Tensor& indices)
{
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("fractional_max_pool2d_backward_out_cuda");
fractional_max_pool2d_backward_out_cuda_template(
gradInput,
gradOutput_,
input,
pool_size,
output_size,
indices);
return gradInput;
}
Tensor fractional_max_pool2d_backward_cuda(
const at::Tensor& gradOutput_,
const at::Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const at::Tensor& indices)
{
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("fractional_max_pool2d_backward_cuda");
Tensor gradInput = at::empty({0}, input.options());
fractional_max_pool2d_backward_out_cuda_template(
gradInput,
gradOutput_,
input,
pool_size,
output_size,
indices);
return gradInput;
}
}// at::native
}// at
|
5babb293e16c8df5e65053888ed71bc9209fb850.cu
|
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/cuda/detail/KernelUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <c10/util/Exception.h>
#include <THC/THCAtomics.cuh>
#include <THC/THCNumerics.cuh>
#include <algorithm>
#include <cfloat>
#include <cmath>
namespace at {
namespace native {
using namespace at::cuda::detail;
namespace {
template <typename scalar_t, typename accscalar_t>
__device__ inline int get_interval(accscalar_t sample,
int index, int inputSize, int outputSize, int poolSize) {
accscalar_t alpha = static_cast<accscalar_t>(inputSize - poolSize) /
static_cast<accscalar_t>(outputSize - 1);
if (index == outputSize - 1) {
return inputSize - poolSize;
} else {
return static_cast<int>((index + sample) * alpha) -
static_cast<int>(sample * alpha);
}
}
template <typename scalar_t>
__global__ void fractional_max_pool2d_out_cuda_frame(
PackedTensorAccessor<scalar_t, 4> output,
PackedTensorAccessor<int64_t, 4> indices,
PackedTensorAccessor<scalar_t, 4> input,
PackedTensorAccessor<scalar_t, 3> samples,
int poolSizeH, int poolSizeW) {
using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>;
int ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
// Each thread generates a specific output point
if (ourOutputPoint < output.size(2) * output.size(3)) {
int outputW = ourOutputPoint % output.size(3);
int outputH = ourOutputPoint / output.size(3);
int poolW = get_interval<scalar_t, accscalar_t>(
static_cast<accscalar_t>(samples[batch][plane][0]),
outputW, input.size(3), output.size(3), poolSizeW);
int poolH = get_interval<scalar_t, accscalar_t>(
static_cast<accscalar_t>(samples[batch][plane][1]),
outputH, input.size(2), output.size(2), poolSizeH);
scalar_t maxVal = at::numeric_limits<scalar_t>::lower_bound();
int maxIndex = poolH * input.size(3) + poolW;
for (int h = poolH; h < poolH + poolSizeH; ++h) {
if (poolSizeW < 2 || poolSizeW > 7) {
for (int w = poolW; w < poolW + poolSizeW; ++w) {
scalar_t val = input[batch][plane][h][w];
// for consistency with THNN, favor the first max
if (val > maxVal || THCNumerics<scalar_t>::isnan(val)) {
maxIndex = h * input.size(3) + w;
maxVal = val;
}
}
} else {
for (int i = 0; i < poolSizeW; ++i) {
int w = i + poolW;
scalar_t val = input[batch][plane][h][w];
// for consistency with THNN, favor the first max
if (val > maxVal || THCNumerics<scalar_t>::isnan(val)) {
maxIndex = h * input.size(3) + w;
maxVal = val;
}
}
}
}
indices[batch][plane][outputH][outputW] = maxIndex;
output[batch][plane][outputH][outputW] = maxVal;
}
}
template <typename scalar_t>
__global__ void fractional_max_pool2d_backward_out_cuda_frame(
PackedTensorAccessor<scalar_t, 4> gradInput,
PackedTensorAccessor<scalar_t, 4> gradOutput,
PackedTensorAccessor<int64_t, 4> indices) {
// Output (h, w) point that this thread is responsible for
int ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
// Each thread generates a specific output point
if (ourOutputPoint < gradOutput.size(2) *
gradOutput.size(3)) {
int outputW = ourOutputPoint % gradOutput.size(3);
int outputH = ourOutputPoint / gradOutput.size(3);
int index = indices[batch][plane][outputH][outputW];
assert(index >= 0);
int inputW = index % gradInput.size(3);
int inputH = index / gradInput.size(3);
assert(inputH < gradInput.size(2));
gpuAtomicAdd(
&gradInput[batch][plane][inputH][inputW],
gradOutput[batch][plane][outputH][outputW]
);
}
}
void fractional_max_pool2d_out_cuda_template(
Tensor & output,
Tensor& indices,
const Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const Tensor& randomSamples) {
int planeDim = 0;
int dimh = 1;
int dimw = 2;
int numBatch = 1;
int ndims = input.ndimension();
TORCH_CHECK(input.numel() > 0,
"fractional_max_pool2d(): expected input to have non-empty ",
"spatial dimensions.");
TORCH_CHECK((ndims == 3 || ndims == 4),
"non-empty 3D or 4D (batch mode) tensor expected for input");
if (ndims == 4) {
numBatch = input.size(0);
planeDim++;
dimh++;
dimw++;
}
/* sizes */
int numPlanes = input.size(planeDim);
int inputH = input.size(dimh);
int inputW = input.size(dimw);
int outputH = output_size[0];
int outputW = output_size[1];
int poolSizeH = pool_size[0];
int poolSizeW = pool_size[1];
TORCH_CHECK(outputH + poolSizeH - 1 <= inputH,
"fractional_max_pool2d(): pool_size height ", poolSizeH,
" too large relative to input height ", inputH);
TORCH_CHECK(outputW + poolSizeW - 1 <= inputW,
"pool_size width ", poolSizeW,
" too large relative to input width ", inputW);
if (ndims == 3) {
/* resize output */
output.resize_({numPlanes, outputH, outputW});
/* indices will contain the locations for each output point */
indices.resize_({numPlanes, outputH, outputW});
} else {
output.resize_({numBatch, numPlanes, outputH, outputW});
indices.resize_({numBatch, numPlanes, outputH, outputW});
}
auto output_ = output;
auto input_ = input;
auto indices_ = indices;
if(ndims == 3) {
output_ = output_.reshape({1, numPlanes, outputH, outputW});
indices_ = indices_.reshape({1, numPlanes, outputH, outputW});
input_ = input_.reshape({1, input.size(0), input.size(1), input.size(2)});
}
// block is limited to 4 warps
// grid handles overflow per each plane
int outputPlaneSize = output_.size(2) *
output_.size(3);
dim3 grid((outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128)
input_.size(1),
input_.size(0));
dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(),
"fractional_max_pool2d_out_cuda_frame",
[&] {
auto devInput = input_.packed_accessor<scalar_t, 4>();
auto devOutput = output_.packed_accessor<scalar_t, 4>();
auto devIndices = indices_.packed_accessor<int64_t, 4>();
auto devSamples = randomSamples.packed_accessor<scalar_t, 3>();
fractional_max_pool2d_out_cuda_frame<scalar_t>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
devOutput, devIndices, devInput, devSamples,
poolSizeH, poolSizeW);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
);
}
void fractional_max_pool2d_backward_out_cuda_template(
Tensor& gradInput,
const Tensor& gradOutput,
const Tensor& input,
IntArrayRef pool_size /* unused */,
IntArrayRef output_size,
const Tensor& indices)
{
int dimh = 1;
int dimw = 2;
int ndims = input.ndimension();
if (ndims == 4) {
dimh++;
dimw++;
}
/* sizes */
int inputH = input.size(dimh);
int inputW = input.size(dimw);
int outputH = output_size[0];
int outputW = output_size[1];
TORCH_CHECK(outputH == gradOutput.size(dimh),
"fractional_max_pool2d(): gradOutput height unexpected");
TORCH_CHECK(outputW == gradOutput.size(dimw),
"fractional_max_pool2d(): gradOutput width unexpected");
/* resize */
gradInput.resize_as_(input);
gradInput.zero_();
auto gradInput_ = gradInput;
auto gradOutput_ = gradOutput;
auto indices_ = indices;
if(ndims == 3) {
gradInput_ = gradInput_.reshape({1, input.size(0), inputH, inputW});
gradOutput_ = gradOutput_.reshape({1, gradOutput.size(0), outputH, outputW});
indices_ = indices_.reshape({1, indices_.size(0), outputH, outputW});
}
/* backprop */
// block is limited to 4 warps
// grid handles overflow per each plane
int outputPlaneSize = gradOutput_.size(2) *
gradOutput_.size(3);
dim3 grid((outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128)
gradInput_.size(1),
gradInput_.size(0));
dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize);
auto devIndices = indices.packed_accessor<int64_t, 4>();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(),
"fractional_max_pool2d_backward_out_cuda_frame",
[&] {
auto devGradInput = gradInput_.packed_accessor<scalar_t, 4>();
auto devGradOutput = gradOutput_.packed_accessor<scalar_t, 4>();
fractional_max_pool2d_backward_out_cuda_frame<scalar_t>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
devGradInput, devGradOutput, devIndices);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
);
}
}// namespace
std::tuple<Tensor&, Tensor&> fractional_max_pool2d_out_cuda(
at::Tensor& output,
at::Tensor& indices,
const at::Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const at::Tensor& randomSamples)
{
fractional_max_pool2d_out_cuda_template(
output,
indices,
input,
pool_size,
output_size,
randomSamples);
return std::tuple<Tensor&, Tensor&>(output, indices);
}
std::tuple<Tensor, Tensor> fractional_max_pool2d_cuda(
const at::Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const at::Tensor& randomSamples)
{
Tensor output = at::empty({0}, input.options());
Tensor indices = at::empty({0}, input.options().dtype(kLong));
fractional_max_pool2d_out_cuda_template(
output,
indices,
input,
pool_size,
output_size,
randomSamples);
return std::tuple<Tensor, Tensor>(output, indices);
}
Tensor& fractional_max_pool2d_backward_out_cuda(
at::Tensor& gradInput,
const at::Tensor& gradOutput_,
const at::Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const at::Tensor& indices)
{
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("fractional_max_pool2d_backward_out_cuda");
fractional_max_pool2d_backward_out_cuda_template(
gradInput,
gradOutput_,
input,
pool_size,
output_size,
indices);
return gradInput;
}
Tensor fractional_max_pool2d_backward_cuda(
const at::Tensor& gradOutput_,
const at::Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const at::Tensor& indices)
{
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("fractional_max_pool2d_backward_cuda");
Tensor gradInput = at::empty({0}, input.options());
fractional_max_pool2d_backward_out_cuda_template(
gradInput,
gradOutput_,
input,
pool_size,
output_size,
indices);
return gradInput;
}
}// at::native
}// at
|
e0561dcf854022cd9f27ea3eecaeaf03b742b0f4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "naive.h"
namespace StreamCompaction {
namespace Naive {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
int* dev_bufferA;
int* dev_bufferB;
int numObjects;
__global__ void kernNaiveScan(int N, int* A, int* B, int temp) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
if (index < temp) {
A[index] = B[index];
return;
}
A[index] = B[index - temp] + B[index];
}
void initSimulation(int N, const int* B) {
numObjects = N;
hipMalloc((void**)&dev_bufferA, N * sizeof(int));
hipMalloc((void**)&dev_bufferB, N * sizeof(int));
hipMemcpy(dev_bufferB, B, N * sizeof(int), hipMemcpyHostToDevice);
}
void endSimulation() {
hipFree(dev_bufferA);
hipFree(dev_bufferB);
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
initSimulation(n, idata);
const int blockSize = 256;
dim3 numBoidBlocks((n + blockSize - 1) / blockSize);
int dmax = ilog2ceil(n);
timer().startGpuTimer();
for (int i = 1; i <= dmax; i++) {
kernNaiveScan << <numBoidBlocks, blockSize >> > (n, dev_bufferA, dev_bufferB, int(powf(2, i - 1)));
std::swap(dev_bufferA, dev_bufferB);
}
timer().endGpuTimer();
hipMemcpy(odata + 1, dev_bufferB, (n - 1) * sizeof(int), hipMemcpyDeviceToHost);
odata[0] = 0;
endSimulation();
}
}
}
|
e0561dcf854022cd9f27ea3eecaeaf03b742b0f4.cu
|
#include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
#include "naive.h"
namespace StreamCompaction {
namespace Naive {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
int* dev_bufferA;
int* dev_bufferB;
int numObjects;
__global__ void kernNaiveScan(int N, int* A, int* B, int temp) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
if (index < temp) {
A[index] = B[index];
return;
}
A[index] = B[index - temp] + B[index];
}
void initSimulation(int N, const int* B) {
numObjects = N;
cudaMalloc((void**)&dev_bufferA, N * sizeof(int));
cudaMalloc((void**)&dev_bufferB, N * sizeof(int));
cudaMemcpy(dev_bufferB, B, N * sizeof(int), cudaMemcpyHostToDevice);
}
void endSimulation() {
cudaFree(dev_bufferA);
cudaFree(dev_bufferB);
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
initSimulation(n, idata);
const int blockSize = 256;
dim3 numBoidBlocks((n + blockSize - 1) / blockSize);
int dmax = ilog2ceil(n);
timer().startGpuTimer();
for (int i = 1; i <= dmax; i++) {
kernNaiveScan << <numBoidBlocks, blockSize >> > (n, dev_bufferA, dev_bufferB, int(powf(2, i - 1)));
std::swap(dev_bufferA, dev_bufferB);
}
timer().endGpuTimer();
cudaMemcpy(odata + 1, dev_bufferB, (n - 1) * sizeof(int), cudaMemcpyDeviceToHost);
odata[0] = 0;
endSimulation();
}
}
}
|
74e524e4508b2424358fdb140f9811e898912d02.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <cstdio>
#include <fstream>
#include <cstdlib>
using namespace std;
#include <hip/hip_runtime.h>
//#include <sdkHelper.h>
#define TIMES 1
#ifdef GEM5_FUSION
#include <stdint.h>
extern "C" {
void m5_work_begin(uint64_t workid, uint64_t threadid);
void m5_work_end(uint64_t workid, uint64_t threadid);
}
#endif
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////HELP FUNCTIONS/////////////////////////////////////////////////
void RandomInit(float* data, int n)
{
for (int i=0; i<n; i++)
{
data[i] = rand() / (float)RAND_MAX;
}
}
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line )
{
if(hipSuccess != err)
{
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if (hipSuccess != err)
{
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",
file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////__NAIVE_MATRIX_MULTIPLICATION_///////////////////////////////////////////////
// Device code
// Compute C = A * B
#define TILEWIDTH_X 32
#define TILEWIDTH_Y 32
#define TILEWIDTH 32
__global__ void matrixMultiply(float * A, float * B, float * C,
int numAColumns,
int numBColumns,
int numCRows, int numCColumns) {
float Cvalue = 0;
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row < numCRows && col < numCColumns)
{
for(int e=0; e<numAColumns; ++e)
Cvalue += A[row*numAColumns + e] * B[e*numBColumns + col];
C[row*numCColumns + col] = Cvalue;
}
}
void MatrixMulOnHost(float * A, float * B, float * C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
for (int i = 0; i < numARows; ++i)
for (int j = 0; j < numBColumns; ++j) {
float sum = 0;
for (int k = 0; k < numAColumns; ++k) {
float a = A[i * numAColumns + k];
float b = B[k * numBColumns + j];
sum += a * b;
}
C[i * numCColumns + j] = sum;
}
}
int MatrixMulti(int numARows, int numAColumns, int numBRows, int numBColumns, int blockx, int blocky, bool optimzed, bool define=false)
{
if(!optimzed)
printf("NAIVE MATRIX MULTIPLICATION\n");
else if(define)
printf("Optimzed MATRIX MULTIPLICATION with static shared memory allocation\n");
else
printf("Optimzed MATRIX MULTIPLICATION with static dynamic memory allocation\n");
float * hostA; // The A matrix
float * hostB; // The B matrix
float * hostC; // The output C matrix
float * deviceA;
float * deviceB;
float * deviceC;
int numCRows = numARows;; // number of rows in the matrix C (you have to set this)
int numCColumns = numBColumns;; // number of columns in the matrix C (you have to set this)
double total_time=0;
//StopWatchInterface* timer;
int sizeA = numARows*numAColumns*sizeof(float);
int sizeB = numBRows*numBColumns*sizeof(float);
int sizeC = numCRows*numCColumns*sizeof(float);
if(numAColumns != numBRows)
{
cout<<"Error in inputs dimension! A columns != B rows"<<endl;
exit(-1);
}
// Allocate input vectors h_A and h_B in host memory
hostA = (float*)malloc(sizeA);
hostB = (float*)malloc(sizeB);
hostC = (float*)malloc(sizeC);
// Initialize input vectors
RandomInit(hostA, numARows*numAColumns);
RandomInit(hostB, numBRows*numBColumns);
cout<<"The dimensions of A are "<<numARows<<" x "<<numAColumns<<endl;
cout<<"The dimensions of B are "<<numBRows<<" x "<<numBColumns<<endl;
//Allocate GPU memory here
// checkCudaErrors(hipMalloc(&deviceA, sizeA));
// checkCudaErrors(hipMalloc(&deviceB, sizeB));
// checkCudaErrors(hipMalloc(&deviceC, sizeC));
//@@ Copy memory to the GPU here
//checkCudaErrors(hipMemcpy(deviceA, hostA, sizeA, hipMemcpyHostToDevice));
// checkCudaErrors(hipMemcpy(deviceB, hostB, sizeB, hipMemcpyHostToDevice));
#ifdef GEM5_FUSION
m5_work_begin(0, 0);
#endif
dim3 dimBlock, dimGrid;
dimBlock = dim3(blockx, blocky);
dimGrid = dim3((numCColumns+blockx-1)/blockx, (numCRows+blocky-1)/blocky);
hipLaunchKernelGGL(( matrixMultiply), dim3(dimGrid), dim3(dimBlock), 0, 0, hostA, hostB, hostC, numAColumns, numBColumns, numCRows, numCColumns);
getLastCudaError("kernel launch failure");
checkCudaErrors(hipDeviceSynchronize());
#ifdef GEM5_FUSION
m5_work_end(0, 0);
#endif
double dSeconds = total_time/((double)TIMES * 1000);
double dNumOps = 2.0 * (double)numARows * (double)numAColumns * (double)numBColumns;
double gflops = 1.0e-9 * dNumOps/dSeconds;
cout<<"Time = "<<dSeconds*1.0e3<< "msec"<<endl<<"gflops = "<<gflops<<endl;
//@@ Copy the GPU memory back to the CPU here
// checkCudaErrors(hipMemcpy(hostC, deviceC, sizeC, hipMemcpyDeviceToHost));
// Verify result
//float* hostcpu = (float*)malloc(sizeC);
/*MatrixMulOnHost(hostA,hostB,hostcpu,numARows,numAColumns,numBRows,numBColumns,numCRows,numCColumns);
int i;
int j;
for (i = 0; i < numCRows; ++i)
for(j=0; j<numCColumns; j++)
{
if (fabs(hostC[i*numCColumns + j] - hostcpu[i*numCColumns + j]) > 1e-3)
{
break;
}
}*/
//@@ Free the GPU memory here
//checkCudaErrors(hipFree(deviceA));
// checkCudaErrors(hipFree(deviceB));
//checkCudaErrors(hipFree(deviceC));
//hipDeviceReset();
free(hostA);
free(hostB);
free(hostC);
//free(hostcpu);
/*if(i == numCRows && j == numCColumns)
cout<<"SUCCSESS"<<endl;
else
cout<<"FAILED"<<endl; */
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc,char *argv[])
{
if(argc < 6)
printf("Unsuffcient number of arguments!\n");
else
{
MatrixMulti(atoi(argv[1]), atoi(argv[2]), atoi(argv[3]), atoi(argv[4]), atoi(argv[5]), atoi(argv[6]), false);
}
}
|
74e524e4508b2424358fdb140f9811e898912d02.cu
|
#include <iostream>
#include <cstdio>
#include <fstream>
#include <cstdlib>
using namespace std;
#include <cuda_runtime.h>
//#include <sdkHelper.h>
#define TIMES 1
#ifdef GEM5_FUSION
#include <stdint.h>
extern "C" {
void m5_work_begin(uint64_t workid, uint64_t threadid);
void m5_work_end(uint64_t workid, uint64_t threadid);
}
#endif
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////HELP FUNCTIONS/////////////////////////////////////////////////
void RandomInit(float* data, int n)
{
for (int i=0; i<n; i++)
{
data[i] = rand() / (float)RAND_MAX;
}
}
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err)
{
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err)
{
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",
file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////__NAIVE_MATRIX_MULTIPLICATION_///////////////////////////////////////////////
// Device code
// Compute C = A * B
#define TILEWIDTH_X 32
#define TILEWIDTH_Y 32
#define TILEWIDTH 32
__global__ void matrixMultiply(float * A, float * B, float * C,
int numAColumns,
int numBColumns,
int numCRows, int numCColumns) {
float Cvalue = 0;
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row < numCRows && col < numCColumns)
{
for(int e=0; e<numAColumns; ++e)
Cvalue += A[row*numAColumns + e] * B[e*numBColumns + col];
C[row*numCColumns + col] = Cvalue;
}
}
void MatrixMulOnHost(float * A, float * B, float * C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
for (int i = 0; i < numARows; ++i)
for (int j = 0; j < numBColumns; ++j) {
float sum = 0;
for (int k = 0; k < numAColumns; ++k) {
float a = A[i * numAColumns + k];
float b = B[k * numBColumns + j];
sum += a * b;
}
C[i * numCColumns + j] = sum;
}
}
int MatrixMulti(int numARows, int numAColumns, int numBRows, int numBColumns, int blockx, int blocky, bool optimzed, bool define=false)
{
if(!optimzed)
printf("NAIVE MATRIX MULTIPLICATION\n");
else if(define)
printf("Optimzed MATRIX MULTIPLICATION with static shared memory allocation\n");
else
printf("Optimzed MATRIX MULTIPLICATION with static dynamic memory allocation\n");
float * hostA; // The A matrix
float * hostB; // The B matrix
float * hostC; // The output C matrix
float * deviceA;
float * deviceB;
float * deviceC;
int numCRows = numARows;; // number of rows in the matrix C (you have to set this)
int numCColumns = numBColumns;; // number of columns in the matrix C (you have to set this)
double total_time=0;
//StopWatchInterface* timer;
int sizeA = numARows*numAColumns*sizeof(float);
int sizeB = numBRows*numBColumns*sizeof(float);
int sizeC = numCRows*numCColumns*sizeof(float);
if(numAColumns != numBRows)
{
cout<<"Error in inputs dimension! A columns != B rows"<<endl;
exit(-1);
}
// Allocate input vectors h_A and h_B in host memory
hostA = (float*)malloc(sizeA);
hostB = (float*)malloc(sizeB);
hostC = (float*)malloc(sizeC);
// Initialize input vectors
RandomInit(hostA, numARows*numAColumns);
RandomInit(hostB, numBRows*numBColumns);
cout<<"The dimensions of A are "<<numARows<<" x "<<numAColumns<<endl;
cout<<"The dimensions of B are "<<numBRows<<" x "<<numBColumns<<endl;
//Allocate GPU memory here
// checkCudaErrors(cudaMalloc(&deviceA, sizeA));
// checkCudaErrors(cudaMalloc(&deviceB, sizeB));
// checkCudaErrors(cudaMalloc(&deviceC, sizeC));
//@@ Copy memory to the GPU here
//checkCudaErrors(cudaMemcpy(deviceA, hostA, sizeA, cudaMemcpyHostToDevice));
// checkCudaErrors(cudaMemcpy(deviceB, hostB, sizeB, cudaMemcpyHostToDevice));
#ifdef GEM5_FUSION
m5_work_begin(0, 0);
#endif
dim3 dimBlock, dimGrid;
dimBlock = dim3(blockx, blocky);
dimGrid = dim3((numCColumns+blockx-1)/blockx, (numCRows+blocky-1)/blocky);
matrixMultiply<<<dimGrid, dimBlock>>>(hostA, hostB, hostC, numAColumns, numBColumns, numCRows, numCColumns);
getLastCudaError("kernel launch failure");
checkCudaErrors(cudaThreadSynchronize());
#ifdef GEM5_FUSION
m5_work_end(0, 0);
#endif
double dSeconds = total_time/((double)TIMES * 1000);
double dNumOps = 2.0 * (double)numARows * (double)numAColumns * (double)numBColumns;
double gflops = 1.0e-9 * dNumOps/dSeconds;
cout<<"Time = "<<dSeconds*1.0e3<< "msec"<<endl<<"gflops = "<<gflops<<endl;
//@@ Copy the GPU memory back to the CPU here
// checkCudaErrors(cudaMemcpy(hostC, deviceC, sizeC, cudaMemcpyDeviceToHost));
// Verify result
//float* hostcpu = (float*)malloc(sizeC);
/*MatrixMulOnHost(hostA,hostB,hostcpu,numARows,numAColumns,numBRows,numBColumns,numCRows,numCColumns);
int i;
int j;
for (i = 0; i < numCRows; ++i)
for(j=0; j<numCColumns; j++)
{
if (fabs(hostC[i*numCColumns + j] - hostcpu[i*numCColumns + j]) > 1e-3)
{
break;
}
}*/
//@@ Free the GPU memory here
//checkCudaErrors(cudaFree(deviceA));
// checkCudaErrors(cudaFree(deviceB));
//checkCudaErrors(cudaFree(deviceC));
//cudaDeviceReset();
free(hostA);
free(hostB);
free(hostC);
//free(hostcpu);
/*if(i == numCRows && j == numCColumns)
cout<<"SUCCSESS"<<endl;
else
cout<<"FAILED"<<endl; */
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc,char *argv[])
{
if(argc < 6)
printf("Unsuffcient number of arguments!\n");
else
{
MatrixMulti(atoi(argv[1]), atoi(argv[2]), atoi(argv[3]), atoi(argv[4]), atoi(argv[5]), atoi(argv[6]), false);
}
}
|
ec4360ebfc5b1f3172cef44517354916b0187210.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C"
__global__ void mul_strided_double(int n,int xOffset,int yOffset, double *dx, double *dy,int incx,int incy,double *result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= xOffset && i >= yOffset && i % incx == 0 && i % incy == 0)
result[i] = dy[i] * dx[i];
}
}
|
ec4360ebfc5b1f3172cef44517354916b0187210.cu
|
extern "C"
__global__ void mul_strided_double(int n,int xOffset,int yOffset, double *dx, double *dy,int incx,int incy,double *result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= xOffset && i >= yOffset && i % incx == 0 && i % incy == 0)
result[i] = dy[i] * dx[i];
}
}
|
ffa46c0f799b70e3ac2f01ca998cacb8d9d4df0f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define NO_HIDDEN_NEURONS 5
extern "C"
__global__ void deltasBatch(float *inputs, float *outputs, float *weights, float *weightsDeltas, int noInputs, int inputSize, int offsetHidden){
int gid = blockIdx.x * blockDim.x + threadIdx.x;
float sum=0;
int offsetDeltas = ((inputSize+1)*NO_HIDDEN_NEURONS+NO_HIDDEN_NEURONS+1)*gid;
int offsetInput = noInputs*inputSize*gid;
int offsetOutputs = noInputs*gid;
float activationHidden[NO_HIDDEN_NEURONS];
float error;
int currentHidden = offsetHidden%NO_HIDDEN_NEURONS;
for(int imageIndex=0;imageIndex<=inputSize;imageIndex++){
weightsDeltas[offsetDeltas+(inputSize+1)*currentHidden+imageIndex]=0;
}
weightsDeltas[offsetDeltas+(inputSize+1)*NO_HIDDEN_NEURONS+currentHidden]=0;
for (int i=0;i<noInputs;i++){
for(int hidden=0;hidden<NO_HIDDEN_NEURONS;hidden++){
sum=0;
for(int imageIndex=0;imageIndex<inputSize;imageIndex++){
sum+=inputs[offsetInput+i*inputSize+imageIndex]*weights[(inputSize+1)*hidden+imageIndex];
}
sum+=weights[(inputSize+1)*hidden+inputSize];
if(sum>0) activationHidden[hidden]=1;
else activationHidden[hidden]=0;
//activationHidden[hidden]=sum/(1+abs(sum));
}
sum=0;
for(int hidden=0;hidden<NO_HIDDEN_NEURONS;hidden++){
sum+=activationHidden[hidden]*weights[(inputSize+1)*NO_HIDDEN_NEURONS+hidden];
}
sum+=weights[(inputSize+1)*NO_HIDDEN_NEURONS+NO_HIDDEN_NEURONS];
if(sum>0)sum=1;
else sum=0;
sum=outputs[offsetOutputs+i]-sum;
if(sum!=0){
for(int hidden=0;hidden<NO_HIDDEN_NEURONS;hidden++){
weightsDeltas[offsetDeltas+(inputSize+1)*NO_HIDDEN_NEURONS+hidden]+=sum*activationHidden[hidden];
}
weightsDeltas[offsetDeltas+(inputSize+1)*NO_HIDDEN_NEURONS+NO_HIDDEN_NEURONS]+=sum;
error=sum*weights[(inputSize+1)*NO_HIDDEN_NEURONS+currentHidden];
if(error>0)error=1;
else error=0;
error=error-activationHidden[currentHidden];
if(error!=0){
for(int imageIndex=0;imageIndex<inputSize;imageIndex++){
weightsDeltas[offsetDeltas+(inputSize+1)*currentHidden+imageIndex]+=error*inputs[offsetInput+i*inputSize+imageIndex];
}
weightsDeltas[offsetDeltas+(inputSize+1)*currentHidden+inputSize]+=error;
}
}
}
}
|
ffa46c0f799b70e3ac2f01ca998cacb8d9d4df0f.cu
|
#define NO_HIDDEN_NEURONS 5
extern "C"
__global__ void deltasBatch(float *inputs, float *outputs, float *weights, float *weightsDeltas, int noInputs, int inputSize, int offsetHidden){
int gid = blockIdx.x * blockDim.x + threadIdx.x;
float sum=0;
int offsetDeltas = ((inputSize+1)*NO_HIDDEN_NEURONS+NO_HIDDEN_NEURONS+1)*gid;
int offsetInput = noInputs*inputSize*gid;
int offsetOutputs = noInputs*gid;
float activationHidden[NO_HIDDEN_NEURONS];
float error;
int currentHidden = offsetHidden%NO_HIDDEN_NEURONS;
for(int imageIndex=0;imageIndex<=inputSize;imageIndex++){
weightsDeltas[offsetDeltas+(inputSize+1)*currentHidden+imageIndex]=0;
}
weightsDeltas[offsetDeltas+(inputSize+1)*NO_HIDDEN_NEURONS+currentHidden]=0;
for (int i=0;i<noInputs;i++){
for(int hidden=0;hidden<NO_HIDDEN_NEURONS;hidden++){
sum=0;
for(int imageIndex=0;imageIndex<inputSize;imageIndex++){
sum+=inputs[offsetInput+i*inputSize+imageIndex]*weights[(inputSize+1)*hidden+imageIndex];
}
sum+=weights[(inputSize+1)*hidden+inputSize];
if(sum>0) activationHidden[hidden]=1;
else activationHidden[hidden]=0;
//activationHidden[hidden]=sum/(1+abs(sum));
}
sum=0;
for(int hidden=0;hidden<NO_HIDDEN_NEURONS;hidden++){
sum+=activationHidden[hidden]*weights[(inputSize+1)*NO_HIDDEN_NEURONS+hidden];
}
sum+=weights[(inputSize+1)*NO_HIDDEN_NEURONS+NO_HIDDEN_NEURONS];
if(sum>0)sum=1;
else sum=0;
sum=outputs[offsetOutputs+i]-sum;
if(sum!=0){
for(int hidden=0;hidden<NO_HIDDEN_NEURONS;hidden++){
weightsDeltas[offsetDeltas+(inputSize+1)*NO_HIDDEN_NEURONS+hidden]+=sum*activationHidden[hidden];
}
weightsDeltas[offsetDeltas+(inputSize+1)*NO_HIDDEN_NEURONS+NO_HIDDEN_NEURONS]+=sum;
error=sum*weights[(inputSize+1)*NO_HIDDEN_NEURONS+currentHidden];
if(error>0)error=1;
else error=0;
error=error-activationHidden[currentHidden];
if(error!=0){
for(int imageIndex=0;imageIndex<inputSize;imageIndex++){
weightsDeltas[offsetDeltas+(inputSize+1)*currentHidden+imageIndex]+=error*inputs[offsetInput+i*inputSize+imageIndex];
}
weightsDeltas[offsetDeltas+(inputSize+1)*currentHidden+inputSize]+=error;
}
}
}
}
|
2fa0341cd463ba5be885c1c07edde1853cbf2cbb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "le-simulation.cuh"
#include <cmath>
#include <fstream>
#include <iostream>
#include <random>
// Needed to move this to .cu
std::default_random_engine random_engine{(uint_fast32_t)time(0)}; // seeded random number generator
std::uniform_real_distribution<double> uniform_dist{0.0, 1.0}; // uniform distribution, pass {lowerbound, upperbound}
std::normal_distribution<double> normal_dist{PARAM_MEAN, PARAM_SIGMA}; // normal distribribution, pass {mean, stddev}
////////// GRID INITIALIZATION //////////
__host__ void initialize_doses(double* doses, int N)
{
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
doses[i + j * N] = 0;
}
}
return;
}
__host__ void initialize_densities_random(double* densities, int N)
{
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
densities[i + j * N] = uniform_dist(random_engine);
}
}
return;
}
__host__ void initialize_densities_constant(double* densities, int N, double density)
{
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
densities[i + j * N] = density;
}
}
return;
}
__host__ void initialize_densities_centered_gaussian(double* densities, int N, double max_density, double spread)
{
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
int mid = N / 2; // middle pixel
int x = i - mid;
int y = j - mid;
double std_dev = spread * N;
densities[i + j * N] = max_density * exp(-(x * x + y * y) / (2 * std_dev * std_dev));
}
}
return;
}
__host__ void
initialize_densities_random_gaussians(double* densities, int N, int n_gaussians, double max_density, double spread)
{
double std_dev = spread * N;
double highest = 0;
// Add the Gaussians
for (int k = 0; k < n_gaussians; k++)
{
int mid_x = floor(uniform_dist(random_engine) * N);
int mid_y = floor(uniform_dist(random_engine) * N);
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
int x = i - mid_x;
int y = j - mid_y;
densities[i + j * N] += max_density * exp(-(x * x + y * y) / (2 * std_dev * std_dev));
highest = fmax(highest, densities[i * N + j]);
}
}
}
// Normalize the resulting density distribution
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
densities[i + j * N] = max_density * densities[i * N + j] / highest;
}
}
return;
}
////////// OUTPUT //////////
__host__ void write_to_csv_file(double* grid_data, int N, const std::string& filename)
{
std::ofstream output;
output.open(filename);
for (int j = 0; j < N; j++)
{
for (int i = 0; i < N - 1; i++)
{
output << grid_data[i + j * N] << ",";
}
output << grid_data[N - 1 + j * N] << "\n";
}
output.close();
return;
}
////////// RANDOMIZATION //////////
__device__ void init_curand_state(hiprandState_t* state)
{
// Initialize random kernel
int tId = threadIdx.x + (blockIdx.x * blockDim.x);
hiprand_init((unsigned long long)clock(), tId, 0, state);
return;
}
__device__ double uniform_angle_dist(hiprandState_t* state) { return 2 * M_PI * hiprand_uniform_double(state); }
__device__ double normal_angle_dist(hiprandState_t* state, double mean, double std_dev)
{
return mean + std_dev * hiprand_normal_double(state);
}
__device__ double random_source_angle_uniform()
{
hiprandState_t state;
init_curand_state(&state);
double angle = uniform_angle_dist(&state);
// Angle of 2 pi goes to 0
if (abs(angle - 2 * M_PI) < PARAM_EPSILON)
{
angle = 0.0;
}
return angle;
}
__host__ double random_source_angle_normal()
{
double angle = normal_dist(random_engine);
// Normalize angle
while (angle < 0.0)
{
angle += 2 * M_PI;
}
while (angle >= 2 * M_PI)
{
angle -= 2 * M_PI;
}
return angle;
}
////////// RAY LOCATION CHECKING //////////
__host__ __device__ Region get_region(Pixel position, int N, int M)
{
int px = position.first;
int py = position.second;
Region region;
region.first = px / M;
region.second = py / M;
return region;
}
__host__ __device__ int get_region_index(Pixel position, int N, int M)
{
Region region = get_region(position, N, M);
int L = N / M; // number of regions per side
return region.first + region.second * L;
}
__device__ bool out_of_bounds(Pixel current_pixel, int N)
{
return (current_pixel.first < 0 || current_pixel.first >= N || current_pixel.second < 0 ||
current_pixel.second >= N);
}
////////// RAY CREATION //////////
__host__ void spawn_primary_rays(
std::vector<RegionGroup>& region_groups, int num_primary_rays, int max_rays_per_ray_group, int N, int M)
{
int L = N / M; // number of regions per side
for (int i = 0; i < num_primary_rays; i++)
{
// Randomly select source angle from normal distribution
double source_angle = random_source_angle_normal();
// Calculate initial ray position
double horiz_dist_from_center = PARAM_D * N * tan(source_angle); // horizontal distance from center of top edge
int middle_pixel = N / 2;
double horiz_dist_from_left = middle_pixel + horiz_dist_from_center;
// Check if ray missed the grid entirely
if (horiz_dist_from_left < 0 || horiz_dist_from_left >= N ||
(source_angle >= M_PI / 2 && source_angle <= 3 * M_PI / 2))
{
continue;
}
// If not, spawn it
double horiz_dist_from_left_rounded = floor(horiz_dist_from_left);
Pixel spawn_pixel;
spawn_pixel.first = horiz_dist_from_left_rounded;
spawn_pixel.second = 0; // always starts from top of grid
double edge_dist = horiz_dist_from_left - horiz_dist_from_left_rounded;
Ray r = Ray::primary(source_angle, spawn_pixel, PIXEL_EDGE::TOP, edge_dist);
// Create new ray group for primary ray and add it
RayGroup primary_ray_group;
primary_ray_group.my_rays = (Ray*)malloc(max_rays_per_ray_group * sizeof(Ray));
primary_ray_group.max_size = max_rays_per_ray_group;
primary_ray_group.my_rays[0] = r; // add the new ray
primary_ray_group.my_size = 1;
// Add the new ray group to the appropriate region
Region region = get_region(spawn_pixel, N, M);
int region_index = region.first + region.second * L; // index of region within vector of region groups
region_groups[region_index].push_back(primary_ray_group);
}
return;
}
__device__ void spawn_secondary_rays(RayGroup* group, Pixel spawn_pixel, double total_energy, int N)
{
// int thread_index = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = 0; i < PARAM_KS; i++)
{
double source_angle = random_source_angle_uniform(); // uniform random source angle
double partial_energy = total_energy / PARAM_KS;
Ray new_ray = Ray::secondary_from_center(source_angle, spawn_pixel, partial_energy);
Pixel current_pixel = new_ray.get_current_pixel();
if (out_of_bounds(current_pixel, N))
{
continue;
}
group->my_rays[group->my_size] = new_ray;
group->my_size++;
}
return;
}
////////// THREAD GROUP EVOLUTION //////////
__device__ bool random_interact(Pixel target_pixel, double distance, double* densities, int N)
{
int i = target_pixel.first, j = target_pixel.second;
double density = densities[i + j * N];
double l_ep = density * distance; // effective path length travelled in pixel
double probability = ::exp(-PARAM_A / l_ep);
hiprandState_t state;
init_curand_state(&state);
double rand = hiprand_uniform_double(&state);
return (rand < probability);
}
__device__ void
transfer_energy(Ray* ray, Pixel target_pixel, double unscaled_energy, double* densities, double* doses, int N)
{
int i = target_pixel.first, j = target_pixel.second;
double density = densities[i + j * N];
double energy_to_transfer = unscaled_energy * density; // scale energy by pixel density
double current_ray_energy = ray->get_current_energy();
// Ray cannot transfer more energy that it has
energy_to_transfer = fmin(energy_to_transfer, current_ray_energy);
// Remove energy from ray and add it to pixel dose
ray->set_current_energy(current_ray_energy - energy_to_transfer);
doses[i + j * N] += energy_to_transfer;
return;
}
/*
__device__ void print_buffer(RegroupBuffer*& g_buffer_cuda, int num_ray_groups)
{
int section_size = g_buffer_cuda->section_size;
////printf("section size on buffer: %d\n", section_size);
for (int i = 0; i < num_ray_groups; i++)
{
for (int j = 0; j < section_size; j++)
{
Pixel pixel = g_buffer_cuda->rays[i + j].get_current_pixel();
// printf("%d, %d\t", pixel.first, pixel.second);
}
// printf("\n");
}
// printf("\n");
for (int i = 0; i < num_ray_groups; i++)
{
for (int j = 0; j < section_size; j++)
{
int region_index = g_buffer_cuda->region_indices[i + j];
// printf("%d\t", region_index);
}
// printf("\n");
}
// printf("\n");
for (int i = 0; i < num_ray_groups; i++)
{
int ray_count = g_buffer_cuda->ray_counts[i];
// printf("%d\t", ray_count);
}
// printf("\n***************************\n");
}
*/
__device__ int evolve_rays(RayGroup* group,
int region_index,
double* densities,
double* doses,
int N,
int M,
RegroupBuffer* g_buffer_cuda,
int num_ray_groups)
{
int rays_evolved = 0;
int thread_index = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = 0; i < group->my_size; i++)
{
Ray* r = &group->my_rays[i];
// Only evolve active rays
if (r->is_active())
{
// Trace ray
TraceHistory rtrace = r->trace();
Pixel visited_pixel = rtrace.visited;
double travel_distance = rtrace.distance; // distance traveled in visited pixel
rays_evolved++;
if (r->is_primary()) // primary ray
{
if (random_interact(visited_pixel, travel_distance, densities, N))
{
double energy_to_deposit = PARAM_F * travel_distance * r->get_current_energy();
transfer_energy(r, visited_pixel, energy_to_deposit, densities, doses, N);
spawn_secondary_rays(group, visited_pixel, r->get_current_energy(), N);
r->set_current_energy(0);
}
}
else // secondary ray
{
double energy_to_deposit = PARAM_G * travel_distance;
transfer_energy(r, visited_pixel, energy_to_deposit, densities, doses, N);
}
// Check if the ray is still in the region
int new_region_index = get_region_index(r->get_current_pixel(), N, M);
// Deactivate ray if out of energy or outside of the grid bounds
if (r->get_current_energy() < PARAM_EPSILON || out_of_bounds(r->get_current_pixel(), N))
{
r->deactivate();
}
else if (new_region_index != region_index)
{
// printf("thread index: %d, Ray is out of region %d, deactivating and adding to buffer\n",
r->deactivate();
int buffer_index = thread_index * g_buffer_cuda->section_size +
g_buffer_cuda->ray_counts[thread_index]; // this thread's next index in buffer
g_buffer_cuda->rays[buffer_index] = *r; // add ray to buffer
g_buffer_cuda->region_indices[buffer_index] =
new_region_index; // add destination region index to buffer
g_buffer_cuda->ray_counts[thread_index]++; // update buffer size
}
}
}
return rays_evolved;
}
__device__ void evolve_to_completion(RayGroup* group,
int region_index,
double* densities,
double* doses,
int N,
int M,
RegroupBuffer* g_buffer_cuda,
int num_ray_groups)
{
// int thread_index = blockIdx.x * blockDim.x + threadIdx.x;
int rays_evolved = group->my_size;
while (rays_evolved > 0)
{
rays_evolved = evolve_rays(group, region_index, densities, doses, N, M, g_buffer_cuda, num_ray_groups);
// printf("thread %d evolved %d rays\n", thread_index, rays_evolved);
}
// printf("thread %d finished evolving\n", thread_index);
return;
}
__global__ void run_rays(RayGroup* region_group_arr,
int region_group_arr_size,
int region_index,
double* densities,
double* doses,
int N,
int M,
RegroupBuffer* g_buffer_cuda)
{
int thread_index = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_index < region_group_arr_size)
{
RayGroup* cur_ray_group = ®ion_group_arr[thread_index];
// printf("thread index %d: evolving %d rays\n", thread_index, cur_ray_group->my_size);
evolve_to_completion(cur_ray_group, region_index, densities, doses, N, M, g_buffer_cuda, region_group_arr_size);
}
__syncthreads();
return;
}
////////// REGION GROUP RUNNING AND PROCESSING //////////
// add rays from g_buffer to new regions
//
__host__ void regroup(std::vector<RegionGroup>& region_groups, RegroupBuffer* g_buffer, int num_ray_groups)
{
int max_num_rays = g_buffer->section_size;
DEBUG(DB_HOST, std::cout << "Starting regrouping. Num_ray_groups is " << num_ray_groups << std::endl);
for (int i = 0; i < num_ray_groups; i++)
{
int buffer_index = i * g_buffer->section_size; // index into single ray groups data
Ray* rays = &g_buffer->rays[buffer_index]; // array of rays to be regrouped
int* region_indices =
&g_buffer->region_indices[buffer_index]; // array of regions corresponding to array of rays
int num_rays = g_buffer->ray_counts[i]; // number of rays to be regrouped
DEBUG(DB_HOST, std::cout << "Regrouping " << num_rays << " rays." << std::endl);
for (int j = 0; j < num_rays; j++)
{
Ray cur_ray = rays[j]; // current ray to be regrouped
cur_ray.reactivate();
int new_region = region_indices[j]; // region ray is entering
int region_group_size = region_groups[new_region].size(); // size of the region's last ray group
if (cur_ray.is_primary() || region_group_size == 0)
{
RayGroup new_group;
new_group.my_rays = (Ray*)malloc(max_num_rays * sizeof(Ray));
new_group.my_rays[0] = cur_ray;
new_group.my_size = 1;
new_group.max_size = max_num_rays;
region_groups[new_region].push_back(new_group);
}
else
{
RayGroup last_ray_group =
region_groups[new_region][region_group_size - 1]; // last ray group in the region group
DEBUG(DB_HOST, std::cout << "regrouping a ray into region " << new_region << std::endl);
int ray_group_size = last_ray_group.my_size;
if ((ray_group_size == 1 && last_ray_group.my_rays[0].is_primary()) ||
ray_group_size == max_num_rays) // last ray group contains single primary ray
{
RayGroup new_group;
new_group.my_rays = (Ray*)malloc(max_num_rays * sizeof(Ray));
new_group.my_rays[0] = cur_ray;
new_group.my_size = 1;
new_group.max_size = max_num_rays;
region_groups[new_region].push_back(new_group);
}
else if (ray_group_size < max_num_rays) // if last ray group is not full, add ray to ray group
{
last_ray_group.my_rays[ray_group_size] = cur_ray;
last_ray_group.my_size++;
region_groups[new_region][region_group_size - 1] = last_ray_group;
}
}
}
}
// Free memory of RegroupBuffer data members on host
free(g_buffer->rays);
free(g_buffer->region_indices);
free(g_buffer->ray_counts);
return;
}
// allocate a regroup buffer on device
__host__ void init_regroup_buffer_cuda(RegroupBuffer*& g_buffer_cuda, int max_num_rays, int num_ray_groups)
{
DEBUG(DB_HOST, std::cout << "Initializing regroup buffer on device" << std::endl);
// Need to allocate device memory for the members of struct BEFORE copying struct
// Host can't modify device memory
RegroupBuffer temp; // temporary buffer on host
temp.section_size = max_num_rays;
Ray* rays_cuda;
hipMalloc(&rays_cuda, num_ray_groups * max_num_rays * sizeof(Ray));
hipMemset(rays_cuda, 0, num_ray_groups * max_num_rays * sizeof(Ray));
temp.rays = rays_cuda;
int* region_indices_cuda;
hipMalloc(®ion_indices_cuda, num_ray_groups * max_num_rays * sizeof(int));
hipMemset(region_indices_cuda, 0, num_ray_groups * max_num_rays * sizeof(int));
temp.region_indices = region_indices_cuda;
int* ray_counts_cuda;
hipMalloc(&ray_counts_cuda, num_ray_groups * sizeof(int));
hipMemset(ray_counts_cuda, 0, num_ray_groups * sizeof(int)); // initialize all ray counts to zero
temp.ray_counts = ray_counts_cuda;
// Now allocate device memory for buffer and copy from host to device
hipMalloc(&g_buffer_cuda, sizeof(RegroupBuffer));
error_check(hipMemcpy(g_buffer_cuda, &temp, sizeof(RegroupBuffer), hipMemcpyHostToDevice));
DEBUG(DB_HOST, std::cout << "Done" << std::endl);
return;
}
// allocate a regroup buffer on host and copy the contents of device's regroup buffer to it
__host__ void
copy_regroup_buffer_host(RegroupBuffer*& g_buffer, RegroupBuffer*& g_buffer_cuda, int max_num_rays, int num_ray_groups)
{
DEBUG(DB_HOST, std::cout << "Allocating memory for buffer on host" << std::endl);
g_buffer = (RegroupBuffer*)malloc(sizeof(RegroupBuffer));
g_buffer->rays = (Ray*)malloc(num_ray_groups * max_num_rays * sizeof(Ray));
g_buffer->region_indices = (int*)malloc(num_ray_groups * max_num_rays * sizeof(int));
g_buffer->ray_counts = (int*)malloc(num_ray_groups * sizeof(int));
RegroupBuffer* temp_buffer = (RegroupBuffer*)malloc(sizeof(RegroupBuffer));
error_check(hipMemcpy(temp_buffer, g_buffer_cuda, sizeof(RegroupBuffer), hipMemcpyDeviceToHost));
g_buffer->section_size = temp_buffer->section_size;
DEBUG(DB_HOST, std::cout << "Copying buffer from device to host" << std::endl);
error_check(hipMemcpy(g_buffer->rays, temp_buffer->rays, num_ray_groups * max_num_rays * sizeof(Ray),
hipMemcpyDeviceToHost));
error_check(hipMemcpy(g_buffer->region_indices, temp_buffer->region_indices,
num_ray_groups * max_num_rays * sizeof(int), hipMemcpyDeviceToHost));
error_check(hipMemcpy(g_buffer->ray_counts, temp_buffer->ray_counts, num_ray_groups * sizeof(int),
hipMemcpyDeviceToHost));
// Free memory of RegroupBuffer data members on device, and temp buffer on host
hipFree(temp_buffer->rays);
hipFree(temp_buffer->region_indices);
hipFree(temp_buffer->ray_counts);
free(temp_buffer);
DEBUG(DB_HOST, std::cout << "Done" << std::endl);
return;
}
__host__ std::vector<int> get_forward_schedule(int L)
{
std::vector<int> schedule;
for (int a = 0; a < (2 * L - 1); a++)
{
int i = min(a, L - 1);
int j = max(0, a + 1 - L);
while (i >= 0 && j < L)
{
int task_index = i + j * L;
i--;
j++;
schedule.push_back(task_index);
}
}
return schedule;
}
__host__ void run_region_groups(std::vector<RegionGroup>& region_groups, double* densities, double* doses, int N, int M)
{
// Get the schedule
int L = N / M; // number of regions per side
std::vector<int> forward_sched = get_forward_schedule(L); // linear indices of regions in diagonal order
std::vector<int> full_sched = forward_sched;
full_sched.insert(full_sched.end(), forward_sched.rbegin(), forward_sched.rend()); // concat forward and reverse
// Go through schedule in passes until complete
bool active_rays = true;
while (active_rays) // keep going until no rays are left
{
DEBUG(DB_HOST, std::cout << "Pass" << std::endl);
active_rays = false;
for (std::vector<int>::iterator it = full_sched.begin(); it != full_sched.end(); it++)
{
int region_index = *it;
RegionGroup& cur_region_group = region_groups[region_index]; // current region group REFERENCE
DEBUG(DB_HOST, std::cout << "Running region group " << region_index << std::endl);
DEBUG(DB_HOST, std::cout << "It has " << region_groups[region_index].size() << " ray groups" << std::endl);
// Only do things if the region has ray groups
int cur_region_group_size = cur_region_group.size();
if (cur_region_group_size > 0)
{
active_rays = true;
RegroupBuffer* g_buffer; // empty host regroup buffer. Will be filled in by run_region_group
run_region_group(cur_region_group, region_index, densities, doses, N, M, g_buffer); // run group
regroup(region_groups, g_buffer, cur_region_group_size); // regroup
free(g_buffer); // free host buffer
}
}
}
return;
}
__host__ void run_region_group(RegionGroup& region_group,
int region_index,
double* densities,
double* doses,
int N,
int M,
RegroupBuffer*& g_buffer)
{
// Set device memory limits
hipDeviceSetLimit(hipLimitMallocHeapSize, GPU_HEAP_LIMIT);
// First copy rays to ray groups on device, done by just replacing host pointers with device pointers
int num_ray_groups = region_group.size(); // number of ray groups in current region group
DEBUG(DB_HOST, std::cout << "Copying rays from host to device" << std::endl);
int max_num_rays = region_group[0].max_size; // all ray groups have same max size so just get any max size
for (int g = 0; g < num_ray_groups; g++)
{
Ray* rays_cuda;
hipMalloc(&rays_cuda, max_num_rays * sizeof(Ray)); // allocated memory on device
hipMemcpy(rays_cuda, region_group[g].my_rays, max_num_rays * sizeof(Ray),
hipMemcpyHostToDevice); // copy from host to device
Ray* old_host_rays_ptr = region_group[g].my_rays; // pointer to rays on host
region_group[g].my_rays = rays_cuda; // this is now a device pointer NOT a host pointer
free(old_host_rays_ptr); // free host memory
}
DEBUG(DB_HOST, std::cout << "Copying ray groups from host to device" << std::endl);
// Copy region group to GPU (std::vector on host to array on device)
RayGroup* region_group_cuda_arr;
hipMalloc(®ion_group_cuda_arr, num_ray_groups * sizeof(RayGroup)); // allocated memory on device
hipMemcpy(region_group_cuda_arr, region_group.data(), num_ray_groups * sizeof(RayGroup),
hipMemcpyHostToDevice); // copy from host to device
// Allocate regroup buffer on DEVICE
RegroupBuffer* g_buffer_cuda; // empty device regroup buff0er
init_regroup_buffer_cuda(g_buffer_cuda, max_num_rays, num_ray_groups); // allocate device regroup buffer
// Run thread groups in parallel
int grid_size = 1 + num_ray_groups / GPU_BLOCK_SIZE;
int block_size = GPU_BLOCK_SIZE;
DEBUG(DB_HOST,
std::cout << "Calling run_rays with grid_size,block_size " << grid_size << "," << block_size << std::endl);
hipLaunchKernelGGL(( run_rays), dim3(grid_size), dim3(block_size), 0, 0, region_group_cuda_arr, num_ray_groups, region_index, densities, doses, N, M,
g_buffer_cuda);
// Wait for GPU computation to finish
error_check(hipDeviceSynchronize());
// printf("Devices are synchronized\n");
// Copy g_buffer back to host buffer
copy_regroup_buffer_host(g_buffer, g_buffer_cuda, max_num_rays,
num_ray_groups); // copy g_buffer back to host buffer
// Free device memory
// First free ray group pointers on device, which are still stored on host
for (int g = 0; g < num_ray_groups; g++)
{
hipFree(region_group[g].my_rays);
}
hipFree(g_buffer_cuda);
hipFree(region_group_cuda_arr);
// Clear region group vector because we messed with its memory, and its rays are all going to be run
region_group.clear();
return;
}
|
2fa0341cd463ba5be885c1c07edde1853cbf2cbb.cu
|
#include "le-simulation.cuh"
#include <cmath>
#include <fstream>
#include <iostream>
#include <random>
// Needed to move this to .cu
std::default_random_engine random_engine{(uint_fast32_t)time(0)}; // seeded random number generator
std::uniform_real_distribution<double> uniform_dist{0.0, 1.0}; // uniform distribution, pass {lowerbound, upperbound}
std::normal_distribution<double> normal_dist{PARAM_MEAN, PARAM_SIGMA}; // normal distribribution, pass {mean, stddev}
////////// GRID INITIALIZATION //////////
__host__ void initialize_doses(double* doses, int N)
{
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
doses[i + j * N] = 0;
}
}
return;
}
__host__ void initialize_densities_random(double* densities, int N)
{
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
densities[i + j * N] = uniform_dist(random_engine);
}
}
return;
}
__host__ void initialize_densities_constant(double* densities, int N, double density)
{
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
densities[i + j * N] = density;
}
}
return;
}
__host__ void initialize_densities_centered_gaussian(double* densities, int N, double max_density, double spread)
{
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
int mid = N / 2; // middle pixel
int x = i - mid;
int y = j - mid;
double std_dev = spread * N;
densities[i + j * N] = max_density * exp(-(x * x + y * y) / (2 * std_dev * std_dev));
}
}
return;
}
__host__ void
initialize_densities_random_gaussians(double* densities, int N, int n_gaussians, double max_density, double spread)
{
double std_dev = spread * N;
double highest = 0;
// Add the Gaussians
for (int k = 0; k < n_gaussians; k++)
{
int mid_x = floor(uniform_dist(random_engine) * N);
int mid_y = floor(uniform_dist(random_engine) * N);
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
int x = i - mid_x;
int y = j - mid_y;
densities[i + j * N] += max_density * exp(-(x * x + y * y) / (2 * std_dev * std_dev));
highest = fmax(highest, densities[i * N + j]);
}
}
}
// Normalize the resulting density distribution
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
densities[i + j * N] = max_density * densities[i * N + j] / highest;
}
}
return;
}
////////// OUTPUT //////////
__host__ void write_to_csv_file(double* grid_data, int N, const std::string& filename)
{
std::ofstream output;
output.open(filename);
for (int j = 0; j < N; j++)
{
for (int i = 0; i < N - 1; i++)
{
output << grid_data[i + j * N] << ",";
}
output << grid_data[N - 1 + j * N] << "\n";
}
output.close();
return;
}
////////// RANDOMIZATION //////////
__device__ void init_curand_state(curandState_t* state)
{
// Initialize random kernel
int tId = threadIdx.x + (blockIdx.x * blockDim.x);
curand_init((unsigned long long)clock(), tId, 0, state);
return;
}
__device__ double uniform_angle_dist(curandState_t* state) { return 2 * M_PI * curand_uniform_double(state); }
__device__ double normal_angle_dist(curandState_t* state, double mean, double std_dev)
{
return mean + std_dev * curand_normal_double(state);
}
__device__ double random_source_angle_uniform()
{
curandState state;
init_curand_state(&state);
double angle = uniform_angle_dist(&state);
// Angle of 2 pi goes to 0
if (abs(angle - 2 * M_PI) < PARAM_EPSILON)
{
angle = 0.0;
}
return angle;
}
__host__ double random_source_angle_normal()
{
double angle = normal_dist(random_engine);
// Normalize angle
while (angle < 0.0)
{
angle += 2 * M_PI;
}
while (angle >= 2 * M_PI)
{
angle -= 2 * M_PI;
}
return angle;
}
////////// RAY LOCATION CHECKING //////////
__host__ __device__ Region get_region(Pixel position, int N, int M)
{
int px = position.first;
int py = position.second;
Region region;
region.first = px / M;
region.second = py / M;
return region;
}
__host__ __device__ int get_region_index(Pixel position, int N, int M)
{
Region region = get_region(position, N, M);
int L = N / M; // number of regions per side
return region.first + region.second * L;
}
__device__ bool out_of_bounds(Pixel current_pixel, int N)
{
return (current_pixel.first < 0 || current_pixel.first >= N || current_pixel.second < 0 ||
current_pixel.second >= N);
}
////////// RAY CREATION //////////
__host__ void spawn_primary_rays(
std::vector<RegionGroup>& region_groups, int num_primary_rays, int max_rays_per_ray_group, int N, int M)
{
int L = N / M; // number of regions per side
for (int i = 0; i < num_primary_rays; i++)
{
// Randomly select source angle from normal distribution
double source_angle = random_source_angle_normal();
// Calculate initial ray position
double horiz_dist_from_center = PARAM_D * N * tan(source_angle); // horizontal distance from center of top edge
int middle_pixel = N / 2;
double horiz_dist_from_left = middle_pixel + horiz_dist_from_center;
// Check if ray missed the grid entirely
if (horiz_dist_from_left < 0 || horiz_dist_from_left >= N ||
(source_angle >= M_PI / 2 && source_angle <= 3 * M_PI / 2))
{
continue;
}
// If not, spawn it
double horiz_dist_from_left_rounded = floor(horiz_dist_from_left);
Pixel spawn_pixel;
spawn_pixel.first = horiz_dist_from_left_rounded;
spawn_pixel.second = 0; // always starts from top of grid
double edge_dist = horiz_dist_from_left - horiz_dist_from_left_rounded;
Ray r = Ray::primary(source_angle, spawn_pixel, PIXEL_EDGE::TOP, edge_dist);
// Create new ray group for primary ray and add it
RayGroup primary_ray_group;
primary_ray_group.my_rays = (Ray*)malloc(max_rays_per_ray_group * sizeof(Ray));
primary_ray_group.max_size = max_rays_per_ray_group;
primary_ray_group.my_rays[0] = r; // add the new ray
primary_ray_group.my_size = 1;
// Add the new ray group to the appropriate region
Region region = get_region(spawn_pixel, N, M);
int region_index = region.first + region.second * L; // index of region within vector of region groups
region_groups[region_index].push_back(primary_ray_group);
}
return;
}
__device__ void spawn_secondary_rays(RayGroup* group, Pixel spawn_pixel, double total_energy, int N)
{
// int thread_index = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = 0; i < PARAM_KS; i++)
{
double source_angle = random_source_angle_uniform(); // uniform random source angle
double partial_energy = total_energy / PARAM_KS;
Ray new_ray = Ray::secondary_from_center(source_angle, spawn_pixel, partial_energy);
Pixel current_pixel = new_ray.get_current_pixel();
if (out_of_bounds(current_pixel, N))
{
continue;
}
group->my_rays[group->my_size] = new_ray;
group->my_size++;
}
return;
}
////////// THREAD GROUP EVOLUTION //////////
__device__ bool random_interact(Pixel target_pixel, double distance, double* densities, int N)
{
int i = target_pixel.first, j = target_pixel.second;
double density = densities[i + j * N];
double l_ep = density * distance; // effective path length travelled in pixel
double probability = std::exp(-PARAM_A / l_ep);
curandState state;
init_curand_state(&state);
double rand = curand_uniform_double(&state);
return (rand < probability);
}
__device__ void
transfer_energy(Ray* ray, Pixel target_pixel, double unscaled_energy, double* densities, double* doses, int N)
{
int i = target_pixel.first, j = target_pixel.second;
double density = densities[i + j * N];
double energy_to_transfer = unscaled_energy * density; // scale energy by pixel density
double current_ray_energy = ray->get_current_energy();
// Ray cannot transfer more energy that it has
energy_to_transfer = fmin(energy_to_transfer, current_ray_energy);
// Remove energy from ray and add it to pixel dose
ray->set_current_energy(current_ray_energy - energy_to_transfer);
doses[i + j * N] += energy_to_transfer;
return;
}
/*
__device__ void print_buffer(RegroupBuffer*& g_buffer_cuda, int num_ray_groups)
{
int section_size = g_buffer_cuda->section_size;
////printf("section size on buffer: %d\n", section_size);
for (int i = 0; i < num_ray_groups; i++)
{
for (int j = 0; j < section_size; j++)
{
Pixel pixel = g_buffer_cuda->rays[i + j].get_current_pixel();
// printf("%d, %d\t", pixel.first, pixel.second);
}
// printf("\n");
}
// printf("\n");
for (int i = 0; i < num_ray_groups; i++)
{
for (int j = 0; j < section_size; j++)
{
int region_index = g_buffer_cuda->region_indices[i + j];
// printf("%d\t", region_index);
}
// printf("\n");
}
// printf("\n");
for (int i = 0; i < num_ray_groups; i++)
{
int ray_count = g_buffer_cuda->ray_counts[i];
// printf("%d\t", ray_count);
}
// printf("\n***************************\n");
}
*/
__device__ int evolve_rays(RayGroup* group,
int region_index,
double* densities,
double* doses,
int N,
int M,
RegroupBuffer* g_buffer_cuda,
int num_ray_groups)
{
int rays_evolved = 0;
int thread_index = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = 0; i < group->my_size; i++)
{
Ray* r = &group->my_rays[i];
// Only evolve active rays
if (r->is_active())
{
// Trace ray
TraceHistory rtrace = r->trace();
Pixel visited_pixel = rtrace.visited;
double travel_distance = rtrace.distance; // distance traveled in visited pixel
rays_evolved++;
if (r->is_primary()) // primary ray
{
if (random_interact(visited_pixel, travel_distance, densities, N))
{
double energy_to_deposit = PARAM_F * travel_distance * r->get_current_energy();
transfer_energy(r, visited_pixel, energy_to_deposit, densities, doses, N);
spawn_secondary_rays(group, visited_pixel, r->get_current_energy(), N);
r->set_current_energy(0);
}
}
else // secondary ray
{
double energy_to_deposit = PARAM_G * travel_distance;
transfer_energy(r, visited_pixel, energy_to_deposit, densities, doses, N);
}
// Check if the ray is still in the region
int new_region_index = get_region_index(r->get_current_pixel(), N, M);
// Deactivate ray if out of energy or outside of the grid bounds
if (r->get_current_energy() < PARAM_EPSILON || out_of_bounds(r->get_current_pixel(), N))
{
r->deactivate();
}
else if (new_region_index != region_index)
{
// printf("thread index: %d, Ray is out of region %d, deactivating and adding to buffer\n",
r->deactivate();
int buffer_index = thread_index * g_buffer_cuda->section_size +
g_buffer_cuda->ray_counts[thread_index]; // this thread's next index in buffer
g_buffer_cuda->rays[buffer_index] = *r; // add ray to buffer
g_buffer_cuda->region_indices[buffer_index] =
new_region_index; // add destination region index to buffer
g_buffer_cuda->ray_counts[thread_index]++; // update buffer size
}
}
}
return rays_evolved;
}
__device__ void evolve_to_completion(RayGroup* group,
int region_index,
double* densities,
double* doses,
int N,
int M,
RegroupBuffer* g_buffer_cuda,
int num_ray_groups)
{
// int thread_index = blockIdx.x * blockDim.x + threadIdx.x;
int rays_evolved = group->my_size;
while (rays_evolved > 0)
{
rays_evolved = evolve_rays(group, region_index, densities, doses, N, M, g_buffer_cuda, num_ray_groups);
// printf("thread %d evolved %d rays\n", thread_index, rays_evolved);
}
// printf("thread %d finished evolving\n", thread_index);
return;
}
__global__ void run_rays(RayGroup* region_group_arr,
int region_group_arr_size,
int region_index,
double* densities,
double* doses,
int N,
int M,
RegroupBuffer* g_buffer_cuda)
{
int thread_index = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_index < region_group_arr_size)
{
RayGroup* cur_ray_group = ®ion_group_arr[thread_index];
// printf("thread index %d: evolving %d rays\n", thread_index, cur_ray_group->my_size);
evolve_to_completion(cur_ray_group, region_index, densities, doses, N, M, g_buffer_cuda, region_group_arr_size);
}
__syncthreads();
return;
}
////////// REGION GROUP RUNNING AND PROCESSING //////////
// add rays from g_buffer to new regions
//
__host__ void regroup(std::vector<RegionGroup>& region_groups, RegroupBuffer* g_buffer, int num_ray_groups)
{
int max_num_rays = g_buffer->section_size;
DEBUG(DB_HOST, std::cout << "Starting regrouping. Num_ray_groups is " << num_ray_groups << std::endl);
for (int i = 0; i < num_ray_groups; i++)
{
int buffer_index = i * g_buffer->section_size; // index into single ray groups data
Ray* rays = &g_buffer->rays[buffer_index]; // array of rays to be regrouped
int* region_indices =
&g_buffer->region_indices[buffer_index]; // array of regions corresponding to array of rays
int num_rays = g_buffer->ray_counts[i]; // number of rays to be regrouped
DEBUG(DB_HOST, std::cout << "Regrouping " << num_rays << " rays." << std::endl);
for (int j = 0; j < num_rays; j++)
{
Ray cur_ray = rays[j]; // current ray to be regrouped
cur_ray.reactivate();
int new_region = region_indices[j]; // region ray is entering
int region_group_size = region_groups[new_region].size(); // size of the region's last ray group
if (cur_ray.is_primary() || region_group_size == 0)
{
RayGroup new_group;
new_group.my_rays = (Ray*)malloc(max_num_rays * sizeof(Ray));
new_group.my_rays[0] = cur_ray;
new_group.my_size = 1;
new_group.max_size = max_num_rays;
region_groups[new_region].push_back(new_group);
}
else
{
RayGroup last_ray_group =
region_groups[new_region][region_group_size - 1]; // last ray group in the region group
DEBUG(DB_HOST, std::cout << "regrouping a ray into region " << new_region << std::endl);
int ray_group_size = last_ray_group.my_size;
if ((ray_group_size == 1 && last_ray_group.my_rays[0].is_primary()) ||
ray_group_size == max_num_rays) // last ray group contains single primary ray
{
RayGroup new_group;
new_group.my_rays = (Ray*)malloc(max_num_rays * sizeof(Ray));
new_group.my_rays[0] = cur_ray;
new_group.my_size = 1;
new_group.max_size = max_num_rays;
region_groups[new_region].push_back(new_group);
}
else if (ray_group_size < max_num_rays) // if last ray group is not full, add ray to ray group
{
last_ray_group.my_rays[ray_group_size] = cur_ray;
last_ray_group.my_size++;
region_groups[new_region][region_group_size - 1] = last_ray_group;
}
}
}
}
// Free memory of RegroupBuffer data members on host
free(g_buffer->rays);
free(g_buffer->region_indices);
free(g_buffer->ray_counts);
return;
}
// allocate a regroup buffer on device
__host__ void init_regroup_buffer_cuda(RegroupBuffer*& g_buffer_cuda, int max_num_rays, int num_ray_groups)
{
DEBUG(DB_HOST, std::cout << "Initializing regroup buffer on device" << std::endl);
// Need to allocate device memory for the members of struct BEFORE copying struct
// Host can't modify device memory
RegroupBuffer temp; // temporary buffer on host
temp.section_size = max_num_rays;
Ray* rays_cuda;
cudaMalloc(&rays_cuda, num_ray_groups * max_num_rays * sizeof(Ray));
cudaMemset(rays_cuda, 0, num_ray_groups * max_num_rays * sizeof(Ray));
temp.rays = rays_cuda;
int* region_indices_cuda;
cudaMalloc(®ion_indices_cuda, num_ray_groups * max_num_rays * sizeof(int));
cudaMemset(region_indices_cuda, 0, num_ray_groups * max_num_rays * sizeof(int));
temp.region_indices = region_indices_cuda;
int* ray_counts_cuda;
cudaMalloc(&ray_counts_cuda, num_ray_groups * sizeof(int));
cudaMemset(ray_counts_cuda, 0, num_ray_groups * sizeof(int)); // initialize all ray counts to zero
temp.ray_counts = ray_counts_cuda;
// Now allocate device memory for buffer and copy from host to device
cudaMalloc(&g_buffer_cuda, sizeof(RegroupBuffer));
error_check(cudaMemcpy(g_buffer_cuda, &temp, sizeof(RegroupBuffer), cudaMemcpyHostToDevice));
DEBUG(DB_HOST, std::cout << "Done" << std::endl);
return;
}
// allocate a regroup buffer on host and copy the contents of device's regroup buffer to it
__host__ void
copy_regroup_buffer_host(RegroupBuffer*& g_buffer, RegroupBuffer*& g_buffer_cuda, int max_num_rays, int num_ray_groups)
{
DEBUG(DB_HOST, std::cout << "Allocating memory for buffer on host" << std::endl);
g_buffer = (RegroupBuffer*)malloc(sizeof(RegroupBuffer));
g_buffer->rays = (Ray*)malloc(num_ray_groups * max_num_rays * sizeof(Ray));
g_buffer->region_indices = (int*)malloc(num_ray_groups * max_num_rays * sizeof(int));
g_buffer->ray_counts = (int*)malloc(num_ray_groups * sizeof(int));
RegroupBuffer* temp_buffer = (RegroupBuffer*)malloc(sizeof(RegroupBuffer));
error_check(cudaMemcpy(temp_buffer, g_buffer_cuda, sizeof(RegroupBuffer), cudaMemcpyDeviceToHost));
g_buffer->section_size = temp_buffer->section_size;
DEBUG(DB_HOST, std::cout << "Copying buffer from device to host" << std::endl);
error_check(cudaMemcpy(g_buffer->rays, temp_buffer->rays, num_ray_groups * max_num_rays * sizeof(Ray),
cudaMemcpyDeviceToHost));
error_check(cudaMemcpy(g_buffer->region_indices, temp_buffer->region_indices,
num_ray_groups * max_num_rays * sizeof(int), cudaMemcpyDeviceToHost));
error_check(cudaMemcpy(g_buffer->ray_counts, temp_buffer->ray_counts, num_ray_groups * sizeof(int),
cudaMemcpyDeviceToHost));
// Free memory of RegroupBuffer data members on device, and temp buffer on host
cudaFree(temp_buffer->rays);
cudaFree(temp_buffer->region_indices);
cudaFree(temp_buffer->ray_counts);
free(temp_buffer);
DEBUG(DB_HOST, std::cout << "Done" << std::endl);
return;
}
__host__ std::vector<int> get_forward_schedule(int L)
{
std::vector<int> schedule;
for (int a = 0; a < (2 * L - 1); a++)
{
int i = min(a, L - 1);
int j = max(0, a + 1 - L);
while (i >= 0 && j < L)
{
int task_index = i + j * L;
i--;
j++;
schedule.push_back(task_index);
}
}
return schedule;
}
__host__ void run_region_groups(std::vector<RegionGroup>& region_groups, double* densities, double* doses, int N, int M)
{
// Get the schedule
int L = N / M; // number of regions per side
std::vector<int> forward_sched = get_forward_schedule(L); // linear indices of regions in diagonal order
std::vector<int> full_sched = forward_sched;
full_sched.insert(full_sched.end(), forward_sched.rbegin(), forward_sched.rend()); // concat forward and reverse
// Go through schedule in passes until complete
bool active_rays = true;
while (active_rays) // keep going until no rays are left
{
DEBUG(DB_HOST, std::cout << "Pass" << std::endl);
active_rays = false;
for (std::vector<int>::iterator it = full_sched.begin(); it != full_sched.end(); it++)
{
int region_index = *it;
RegionGroup& cur_region_group = region_groups[region_index]; // current region group REFERENCE
DEBUG(DB_HOST, std::cout << "Running region group " << region_index << std::endl);
DEBUG(DB_HOST, std::cout << "It has " << region_groups[region_index].size() << " ray groups" << std::endl);
// Only do things if the region has ray groups
int cur_region_group_size = cur_region_group.size();
if (cur_region_group_size > 0)
{
active_rays = true;
RegroupBuffer* g_buffer; // empty host regroup buffer. Will be filled in by run_region_group
run_region_group(cur_region_group, region_index, densities, doses, N, M, g_buffer); // run group
regroup(region_groups, g_buffer, cur_region_group_size); // regroup
free(g_buffer); // free host buffer
}
}
}
return;
}
__host__ void run_region_group(RegionGroup& region_group,
int region_index,
double* densities,
double* doses,
int N,
int M,
RegroupBuffer*& g_buffer)
{
// Set device memory limits
cudaDeviceSetLimit(cudaLimitMallocHeapSize, GPU_HEAP_LIMIT);
// First copy rays to ray groups on device, done by just replacing host pointers with device pointers
int num_ray_groups = region_group.size(); // number of ray groups in current region group
DEBUG(DB_HOST, std::cout << "Copying rays from host to device" << std::endl);
int max_num_rays = region_group[0].max_size; // all ray groups have same max size so just get any max size
for (int g = 0; g < num_ray_groups; g++)
{
Ray* rays_cuda;
cudaMalloc(&rays_cuda, max_num_rays * sizeof(Ray)); // allocated memory on device
cudaMemcpy(rays_cuda, region_group[g].my_rays, max_num_rays * sizeof(Ray),
cudaMemcpyHostToDevice); // copy from host to device
Ray* old_host_rays_ptr = region_group[g].my_rays; // pointer to rays on host
region_group[g].my_rays = rays_cuda; // this is now a device pointer NOT a host pointer
free(old_host_rays_ptr); // free host memory
}
DEBUG(DB_HOST, std::cout << "Copying ray groups from host to device" << std::endl);
// Copy region group to GPU (std::vector on host to array on device)
RayGroup* region_group_cuda_arr;
cudaMalloc(®ion_group_cuda_arr, num_ray_groups * sizeof(RayGroup)); // allocated memory on device
cudaMemcpy(region_group_cuda_arr, region_group.data(), num_ray_groups * sizeof(RayGroup),
cudaMemcpyHostToDevice); // copy from host to device
// Allocate regroup buffer on DEVICE
RegroupBuffer* g_buffer_cuda; // empty device regroup buff0er
init_regroup_buffer_cuda(g_buffer_cuda, max_num_rays, num_ray_groups); // allocate device regroup buffer
// Run thread groups in parallel
int grid_size = 1 + num_ray_groups / GPU_BLOCK_SIZE;
int block_size = GPU_BLOCK_SIZE;
DEBUG(DB_HOST,
std::cout << "Calling run_rays with grid_size,block_size " << grid_size << "," << block_size << std::endl);
run_rays<<<grid_size, block_size>>>(region_group_cuda_arr, num_ray_groups, region_index, densities, doses, N, M,
g_buffer_cuda);
// Wait for GPU computation to finish
error_check(cudaDeviceSynchronize());
// printf("Devices are synchronized\n");
// Copy g_buffer back to host buffer
copy_regroup_buffer_host(g_buffer, g_buffer_cuda, max_num_rays,
num_ray_groups); // copy g_buffer back to host buffer
// Free device memory
// First free ray group pointers on device, which are still stored on host
for (int g = 0; g < num_ray_groups; g++)
{
cudaFree(region_group[g].my_rays);
}
cudaFree(g_buffer_cuda);
cudaFree(region_group_cuda_arr);
// Clear region group vector because we messed with its memory, and its rays are all going to be run
region_group.clear();
return;
}
|
2d8623a3c2ece4332267c03dd1e4dcea015a43df.hip
|
// !!! This is a file automatically generated by hipify!!!
#ifdef _WIN32
#include "Windows.h"
#endif
#include <iostream>
#include "Common.h"
// Cuda Kernel
#include "KernelCPU.h"
#include "ConvolutionalEDUCNN.h"
ConvolutionalEDUCNN::ConvolutionalEDUCNN(const int &filterWidth, const int &depth, const int &stride)
: LayerDefinition(0, 0, depth, CONVOLUTIONAL_EDUCNN, NONE) {
this->_filterWidth = filterWidth;
this->_filterDim = filterWidth * filterWidth;
this->_depth = depth;
this->_stride = stride;
this->_padding = 0;
}
ConvolutionalEDUCNN::~ConvolutionalEDUCNN() {
}
std::vector<double> ConvolutionalEDUCNN::getWeights(void) {
std::vector<double> wCPU(_wDim);
CHECK(hipMemcpy(&wCPU[0], weight, _wBytes, hipMemcpyDeviceToHost));
return wCPU;
}
std::vector<double> ConvolutionalEDUCNN::getBias(void) {
std::vector<double> bCPU(_nodes);
CHECK(hipMemcpy(&bCPU[0], bias, _nodes * sizeof(double), hipMemcpyDeviceToHost));
return bCPU;
}
int ConvolutionalEDUCNN::getPredictionIndex(void) {
int maxIndex;
// Individuare indice (classe) che corrisponde al valore massimo di output
CHECK_CUBLAS(hipblasIdamax(handle, _nodes, output, 1, &maxIndex));
return maxIndex - 1;
}
void ConvolutionalEDUCNN::defineCuda(const int &prevLayerWidth, const int &prevLayerHeight, const int &prevLayerDepth) {
_prevLayerWidth = prevLayerWidth;
_prevLayerDepth = prevLayerDepth;
//numero di nodi dipende da filtro e nodi livello precedente
//width
_width = _calcOutput(false);
//height
_height = _calcOutput(false);
//depth = numero di filtri
this->_nodes = _width * _height * _depth;
_alignedNodes = ALIGN_UP(_nodes, THREADS);
_uniqueNodes = _width * _height;
#ifdef RELEASE
std::cout << "******** CONV ********\n";
std::cout << "dimensioni input del livello: " << prevLayerWidth << " - " << prevLayerHeight << " - " << prevLayerDepth << std::endl;
std::cout << "dimensioni output del livello: " << _width << " - " << _height << " - " << _depth << std::endl;
std::cout << "\n\n";
#endif
//Creare l'handle di cuBLAS
CHECK_CUBLAS(hipblasCreate(&handle));
// Impostazioni della cache
hipDeviceSetCacheConfig(hipFuncCachePreferShared);
// Dimensione matrice dei pesi
_wDim = _filterDim * prevLayerDepth * _depth;
// Dimensione matrice dei pesi in byte
_wBytes = _wDim * sizeof(double);
// Dimensione bias, output, error
const unsigned int Bytes = _nodes * sizeof(double);
#ifdef DEBUG
// Impostazione buffer che gestisce il printf in Cuda
size_t sz = 1048576 * 1000;
hipDeviceSetLimit(hipLimitPrintfFifoSize, sz);
#endif
// Allocare le matrici
CHECK(hipMalloc((void**)&weight, _wBytes));
CHECK(hipMalloc((void**)&weightRot, _wBytes));
CHECK(hipMalloc((void**)&bias, Bytes));
CHECK(hipMalloc((void**)&output, Bytes));
//CHECK(hipMalloc((void**)&error, Bytes));
CHECK(hipMalloc((void**)&prevError, _prevLayerWidth * _prevLayerWidth * _prevLayerDepth * sizeof(double)));
CHECK(hipMalloc((void**)&tempWeight, _wBytes));
// Dimensione insieme submatrici in byte = creo una submatrice per ogni nodo che compone un blocco di output * la profondit del livello precedente e grande quanto un filtro
unsigned int subBytes = _uniqueNodes * _prevLayerDepth * _filterDim * sizeof(double);
CHECK(hipMalloc((void**)&subForward, subBytes));
const int prevUniqueNodes = _prevLayerWidth * _prevLayerWidth;
subBytes = prevUniqueNodes * _depth * _filterDim * sizeof(double);
CHECK(hipMalloc((void**)&subCalcError, subBytes));
// Dimensione insieme submatrici in byte = creo una submatrice per ogni nodo di filtro
//(prima genero sottomatrici grandi quanto _filterDim e ne genero tante quante uniqueNodes,
// ora genero sottomatrici grandi quanto uniqueNodes e ne genero tante quante _filterDim)
subBytes = _uniqueNodes * _prevLayerDepth * _filterDim * sizeof(double);
CHECK(hipMalloc((void**)&subBack, subBytes));
// matrice temporanea inizializzata a 0 per zero padding
paddingWidth = (_filterWidth - 1) * 2 + _width;
const int uniquePadding = paddingWidth * paddingWidth;
paddingSize = uniquePadding * _depth; //come output
CHECK(hipMalloc((void**)&padding, paddingSize * sizeof(double)));
CHECK(hipMemset(padding, 0, paddingSize * sizeof(double)));
#ifdef DEBUG
std::cout << "Memoria allocata \n" << std::endl;
#endif
// Rendere i blocchi multipli di 32
const int aligned = ALIGN_UP(_filterDim, THREADS);
// Tanti blocchi quanto sono i filtri e la profondit del layer precedente
dim3 numBlocks(_depth, prevLayerDepth, 1);
// Blocchi bidimensionali contenenti tanti thread quanti i nodi che compongono i filtri
dim3 threadBlocks(aligned, 1, 1);
// Inizializza array per numeri casuali
hiprandStateXORWOW_t *devStates;
// Numero di sequenze diverse per il rand
const int numRand = _nodes * prevLayerDepth * aligned;
// Alloca la memoria
CHECK(hipMalloc((void **)&devStates, numRand * sizeof(hiprandStateXORWOW_t)));
// Inizializzare i weight del livello
Kernel::initWeightK(numBlocks, threadBlocks, weight, _wDim, devStates);
// Inizializzare i bias del livello
Kernel::initBiasK((_alignedNodes / THREADS), THREADS, bias, _nodes, devStates);
#ifdef DEBUG
CHECK(hipDeviceSynchronize());
std::cout << "\n\nValore dei pesi\n\n";
printFromCudaFormatted(weight, _wDim, _filterWidth);
std::cout << "\n\nValore dei bias\n\n";
printFromCudaFormatted(bias, _nodes, _width);
std::cout << "\n\n\n\n";
#endif
// Distrugge gli stati
CHECK(hipFree(devStates));
}
void ConvolutionalEDUCNN::forward_propagation(const double * prevOutput) {
#ifdef DEBUG
std::cout << "\n\nValore dell'input\n\n";
printFromCudaFormatted(prevOutput, _prevLayerWidth * _prevLayerWidth * _prevLayerDepth, _prevLayerWidth);
#endif
// Blocchi tridimensionali contenenti tanti thread quanti la grandezza dei filtri
dim3 threadBlocks(_filterWidth, _filterWidth, 1);
// Tanti blocchi quanti sono i nodi in output e il depth del livello precedente
dim3 numBlocks(_width, _height, _prevLayerDepth);
Kernel::createSubmatrixBisK(numBlocks, threadBlocks, subForward, prevOutput, _prevLayerWidth, _filterWidth, _stride, _uniqueNodes);
#ifdef DEBUG_SUB
CHECK(hipDeviceSynchronize());
std::cout << "\n\nValore submatrici\n\n";
printFromCudaFormatted(subForward, _uniqueNodes * _prevLayerDepth * _filterDim, _filterWidth);
#endif
//ora sono in una situazione simile al fully connected
for (int i = 0; i < _depth; i++) {
for (int j = 0; j < _prevLayerDepth; j++) {
(j == 0) ? beta = 0.0 : beta = 1.0;
CHECK_CUBLAS(hipblasDgemv(handle, HIPBLAS_OP_T, _filterDim, _uniqueNodes, &alpha, subForward + (j * _uniqueNodes * _filterDim), _filterDim, weight + (i * _filterDim * _prevLayerDepth) + (j * _filterDim), 1, &beta, output + (i * _uniqueNodes), 1));
//CHECK_CUBLAS(hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1, _uniqueNodes, _filterDim, &alpha, weight + (i * _filterDim * _prevLayerDepth) + (j * _filterDim), 1, subForward + (j * _uniqueNodes), _filterDim, &beta, output + (i * _uniqueNodes), 1));
}
}
#ifdef DEBUG
CHECK(hipDeviceSynchronize());
std::cout << "\n\nValore output senza bias\n\n";
printFromCudaFormatted(output, _nodes, _width);
#endif
// Somma con il bias
//CHECK_CUBLAS(
//hipblasDaxpy(handle, _nodes, &alpha, bias, 1, output, 1));
//CHECK_CUBLAS(
//hipblasDgeam(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1, _nodes, &alpha, bias, 1, &alpha, output, 1, output, 1));
#ifdef DEBUG
CHECK(hipDeviceSynchronize());
std::cout << "\n\nValore output *************************************************\n\n";
printFromCudaFormatted(output, _nodes, _width);
#endif
}
void ConvolutionalEDUCNN::calcError() {
//prev error l'errore del livello precedente che devo riempire,
//error l'errore che ho usato al passo precedente (non ruotato) quando sono passato da questo livello
#ifdef DEBUG
std::cout << "\n\error in calc error\n\n";
printFromCudaFormatted(error, _nodes, _width);
#endif
// Blocchi bidimensionali contenenti tanti thread quanti sono i nodi in output
dim3 threadBlocks(_height, _width, 1);
// Tanti blocchi quanto il numero di filtri
dim3 numBlocks(1, 1, _depth);
Kernel::zeroPaddingBisK(numBlocks, threadBlocks, padding, error, _width, _filterWidth);
#ifdef DEBUG
std::cout << "\n\nerror con zero padding\n\n";
printFromCudaFormatted(padding, paddingSize, paddingWidth);
#endif
// Dimensione insieme submatrici in byte = creo una submatrice per ogni nodo di output di L-1
const int prevUniqueNodes = _prevLayerWidth * _prevLayerWidth;
// Blocchi tridimensionali contenenti tanti thread quanti la grandezza dei filtri
threadBlocks = dim3(_filterWidth, _filterWidth, 1);
// Tanti blocchi quanti sono i nodi in input e il depth del livello precedente
numBlocks = dim3(_prevLayerWidth, _prevLayerWidth, _depth);
Kernel::createSubmatrixBisK(numBlocks, threadBlocks, subCalcError, padding, paddingWidth, _filterWidth, _stride, prevUniqueNodes);
#ifdef DEBUG_SUB
CHECK(hipDeviceSynchronize());
std::cout << "\n\nValore submatrici zero padding\n\n";
printFromCudaFormatted(subCalcError, prevUniqueNodes * _depth * _filterDim, _filterWidth);
#endif
// Ruoto subito i pesi aggiornati per poi usarli nella backpropagation al livello L-1
// Blocchi bidimensionali contenenti tanti thread quanti il numero di filtri
threadBlocks = dim3(_depth, _prevLayerDepth, 1);
// Tanti blocchi quante sono le righe e le colonne di forwardError
numBlocks = dim3(_filterWidth, _filterWidth, 1);
Kernel::rot180BisK(numBlocks, threadBlocks, weight, weightRot, _filterDim);
#ifdef DEBUG
CHECK(hipDeviceSynchronize());
std::cout << "\n\nValore dei pesi ruotati\n\n";
printFromCudaFormatted(weightRot, _wDim, _filterWidth);
#endif
//ora sono in una situazione simile alla convoluzione
for (int i = 0; i < _depth; i++) {
for (int j = 0; j < _prevLayerDepth; j++) {
(i == 0) ? beta = 0.0 : beta = 1.0;
CHECK_CUBLAS(hipblasDgemv(handle, HIPBLAS_OP_T, _filterDim, prevUniqueNodes, &alpha, subCalcError + (i * prevUniqueNodes * _filterDim), _filterDim, weightRot + ((i + j * _depth) * _filterDim), 1, &beta, prevError + (j * prevUniqueNodes), 1));
//CHECK_CUBLAS(hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1, prevUniqueNodes, _filterDim, &alpha, weightRot + ((i + j * _depth) * _filterDim), 1, subCalcError + (i * prevUniqueNodes), _filterDim, &beta, prevError + (j * prevUniqueNodes), 1));
}
}
#ifdef DEBUG
CHECK(hipDeviceSynchronize());
std::cout << "\n\nErrore commesso sui nodi back propagation\n\n";
printFromCudaFormatted(prevError, prevUniqueNodes * _prevLayerDepth, _prevLayerWidth);
#endif
}
void ConvolutionalEDUCNN::back_propagation_output(const double * prevOutput, const uint8_t * labels, const int & target, const double & learningRate) {
// Calcolo dell'errore per ogni nodo
Kernel::outputErrorK((_alignedNodes / THREADS), THREADS, output, error, labels, target, _nodes);
#ifdef DEBUG
CHECK(hipDeviceSynchronize());
std::cout << "\n\nErrore commesso sui nodi back propagation output\n\n";
printFromCudaFormatted(error, _nodes, _width);
#endif
calcError();
// Aggiornamento pesi
updateWeights(prevOutput, learningRate);
}
void ConvolutionalEDUCNN::back_propagation(const double *prevOutput, double *prevError, const double &learningRate, const bool notFirst) {
error = prevError;
if (notFirst)
calcError();
updateWeights(prevOutput, learningRate);
}
void ConvolutionalEDUCNN::updateWeights(const double *prevOutput, const double &learningRate) {
// Blocchi tridimensionali contenenti tanti thread quanti sono i nodi in output
dim3 threadBlocks(_width, _height, 1);
// Tanti blocchi quanti la grandezza dei filtri e il depth del livello precedente
dim3 numBlocks(_filterWidth, _filterWidth, _prevLayerDepth);
Kernel::createSubmatrixBisK(numBlocks, threadBlocks, subBack, prevOutput, _prevLayerWidth, _width, _stride, _filterDim);
#ifdef DEBUG_SUB
CHECK(hipDeviceSynchronize());
std::cout << "\n\nValore submatrici backpropagation\n\n";
printFromCudaFormatted(subBack, _uniqueNodes * _filterDim, _width);
#endif
//ora sono in una situazione simile al fully connected
double backAlpha = 1.0 / _uniqueNodes;
beta = 0.0;
for (int i = 0; i < _depth; i++) {
for (int j = 0; j < _prevLayerDepth; j++) {
CHECK_CUBLAS(hipblasDgemv(handle, HIPBLAS_OP_T, _uniqueNodes, _filterDim, &backAlpha, subBack + (j * _uniqueNodes * _filterDim), _uniqueNodes, error + (i * _uniqueNodes), 1, &beta, tempWeight + ((i + j * _depth) * _filterDim), 1));
//CHECK_CUBLAS(hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1, _filterDim, _uniqueNodes, &backAlpha, error + (i * _uniqueNodes), 1, subBack + (j * _filterDim), _uniqueNodes, &beta, tempWeight + ((i + j * _depth) * _filterDim), 1));
}
}
#ifdef DEBUG
CHECK(hipDeviceSynchronize());
std::cout << "\n\nMatrice temporanea per aggiornamento pesi\n\n";
printFromCudaFormatted(tempWeight, _wDim, _filterWidth);
#endif
// Aggiornamento effettivo dei pesi
CHECK_CUBLAS(
hipblasDaxpy(handle, _wDim, &learningRate, tempWeight, 1, weight, 1));
//CHECK_CUBLAS(
//hipblasDgeam(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, _wDim, _depth, &learningRate, tempWeight, _wDim, &alpha, weight, _wDim, weight, _wDim));
#ifdef DEBUG
CHECK(hipDeviceSynchronize());
std::cout << "\n\nMatrice dei pesi aggiornata\n\n";
printFromCudaFormatted(weight, _wDim, _filterWidth);
#endif
// Aggiornamento del bias
//CHECK_CUBLAS(
//hipblasDaxpy(handle, _nodes, &learningRate, error, 1, bias, 1));
//CHECK_CUBLAS(
//hipblasDgeam(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1, _nodes, &learningRate, errorRot, 1, &alpha, bias, 1, bias, 1));
#ifdef DEBUG
CHECK(hipDeviceSynchronize());
std::cout << "\n\nVettore del bias aggiornato\n\n";
printFromCudaFormatted(bias, _nodes, _width);
#endif
}
void ConvolutionalEDUCNN::deleteCuda() {
CHECK_CUBLAS(hipblasDestroy(handle));
CHECK(hipFree(weight));
CHECK(hipFree(weightRot));
CHECK(hipFree(bias));
CHECK(hipFree(output));
//CHECK(hipFree(error));
CHECK(hipFree(prevError));
CHECK(hipFree(tempWeight));
CHECK(hipFree(subForward));
CHECK(hipFree(subCalcError));
CHECK(hipFree(subBack));
CHECK(hipFree(padding));
}
void ConvolutionalEDUCNN::printW() {
printFromCudaFormatted(weight, _wDim, _filterWidth);
//printFromCudaFormatted(bias, _nodes, _width);
}
int ConvolutionalEDUCNN::_calcOutput(bool withPadding) {
//PER ORA NON CONSIDERATO CASO IN CUI SI GENERANO ERRORI (padding numero non intero, filtro pi grande dell'input, stride che non combacia, ecc)
if (_filterWidth > _prevLayerWidth) {
std::cerr << "Le dimensioni del filtro superano le dimensioni del livello precedente!!" << std::endl;
exit(1);
}
if (withPadding) {
_padding = (_filterWidth - 1) / 2;
return _prevLayerWidth;
}
//+(_stride - 1)) serve per aggiornare per eccesso
return ((_prevLayerWidth - _filterWidth + (_stride - 1)) / _stride) + 1;
}
|
2d8623a3c2ece4332267c03dd1e4dcea015a43df.cu
|
#ifdef _WIN32
#include "Windows.h"
#endif
#include <iostream>
#include "Common.h"
// Cuda Kernel
#include "KernelCPU.h"
#include "ConvolutionalEDUCNN.h"
ConvolutionalEDUCNN::ConvolutionalEDUCNN(const int &filterWidth, const int &depth, const int &stride)
: LayerDefinition(0, 0, depth, CONVOLUTIONAL_EDUCNN, NONE) {
this->_filterWidth = filterWidth;
this->_filterDim = filterWidth * filterWidth;
this->_depth = depth;
this->_stride = stride;
this->_padding = 0;
}
ConvolutionalEDUCNN::~ConvolutionalEDUCNN() {
}
std::vector<double> ConvolutionalEDUCNN::getWeights(void) {
std::vector<double> wCPU(_wDim);
CHECK(cudaMemcpy(&wCPU[0], weight, _wBytes, cudaMemcpyDeviceToHost));
return wCPU;
}
std::vector<double> ConvolutionalEDUCNN::getBias(void) {
std::vector<double> bCPU(_nodes);
CHECK(cudaMemcpy(&bCPU[0], bias, _nodes * sizeof(double), cudaMemcpyDeviceToHost));
return bCPU;
}
int ConvolutionalEDUCNN::getPredictionIndex(void) {
int maxIndex;
// Individuare indice (classe) che corrisponde al valore massimo di output
CHECK_CUBLAS(cublasIdamax(handle, _nodes, output, 1, &maxIndex));
return maxIndex - 1;
}
void ConvolutionalEDUCNN::defineCuda(const int &prevLayerWidth, const int &prevLayerHeight, const int &prevLayerDepth) {
_prevLayerWidth = prevLayerWidth;
_prevLayerDepth = prevLayerDepth;
//numero di nodi dipende da filtro e nodi livello precedente
//width
_width = _calcOutput(false);
//height
_height = _calcOutput(false);
//depth = numero di filtri
this->_nodes = _width * _height * _depth;
_alignedNodes = ALIGN_UP(_nodes, THREADS);
_uniqueNodes = _width * _height;
#ifdef RELEASE
std::cout << "******** CONV ********\n";
std::cout << "dimensioni input del livello: " << prevLayerWidth << " - " << prevLayerHeight << " - " << prevLayerDepth << std::endl;
std::cout << "dimensioni output del livello: " << _width << " - " << _height << " - " << _depth << std::endl;
std::cout << "\n\n";
#endif
//Creare l'handle di cuBLAS
CHECK_CUBLAS(cublasCreate(&handle));
// Impostazioni della cache
cudaDeviceSetCacheConfig(cudaFuncCachePreferShared);
// Dimensione matrice dei pesi
_wDim = _filterDim * prevLayerDepth * _depth;
// Dimensione matrice dei pesi in byte
_wBytes = _wDim * sizeof(double);
// Dimensione bias, output, error
const unsigned int Bytes = _nodes * sizeof(double);
#ifdef DEBUG
// Impostazione buffer che gestisce il printf in Cuda
size_t sz = 1048576 * 1000;
cudaDeviceSetLimit(cudaLimitPrintfFifoSize, sz);
#endif
// Allocare le matrici
CHECK(cudaMalloc((void**)&weight, _wBytes));
CHECK(cudaMalloc((void**)&weightRot, _wBytes));
CHECK(cudaMalloc((void**)&bias, Bytes));
CHECK(cudaMalloc((void**)&output, Bytes));
//CHECK(cudaMalloc((void**)&error, Bytes));
CHECK(cudaMalloc((void**)&prevError, _prevLayerWidth * _prevLayerWidth * _prevLayerDepth * sizeof(double)));
CHECK(cudaMalloc((void**)&tempWeight, _wBytes));
// Dimensione insieme submatrici in byte = creo una submatrice per ogni nodo che compone un blocco di output * la profonditÓ del livello precedente e grande quanto un filtro
unsigned int subBytes = _uniqueNodes * _prevLayerDepth * _filterDim * sizeof(double);
CHECK(cudaMalloc((void**)&subForward, subBytes));
const int prevUniqueNodes = _prevLayerWidth * _prevLayerWidth;
subBytes = prevUniqueNodes * _depth * _filterDim * sizeof(double);
CHECK(cudaMalloc((void**)&subCalcError, subBytes));
// Dimensione insieme submatrici in byte = creo una submatrice per ogni nodo di filtro
//(prima genero sottomatrici grandi quanto _filterDim e ne genero tante quante uniqueNodes,
// ora genero sottomatrici grandi quanto uniqueNodes e ne genero tante quante _filterDim)
subBytes = _uniqueNodes * _prevLayerDepth * _filterDim * sizeof(double);
CHECK(cudaMalloc((void**)&subBack, subBytes));
// matrice temporanea inizializzata a 0 per zero padding
paddingWidth = (_filterWidth - 1) * 2 + _width;
const int uniquePadding = paddingWidth * paddingWidth;
paddingSize = uniquePadding * _depth; //come output
CHECK(cudaMalloc((void**)&padding, paddingSize * sizeof(double)));
CHECK(cudaMemset(padding, 0, paddingSize * sizeof(double)));
#ifdef DEBUG
std::cout << "Memoria allocata \n" << std::endl;
#endif
// Rendere i blocchi multipli di 32
const int aligned = ALIGN_UP(_filterDim, THREADS);
// Tanti blocchi quanto sono i filtri e la profonditÓ del layer precedente
dim3 numBlocks(_depth, prevLayerDepth, 1);
// Blocchi bidimensionali contenenti tanti thread quanti i nodi che compongono i filtri
dim3 threadBlocks(aligned, 1, 1);
// Inizializza array per numeri casuali
curandStateXORWOW_t *devStates;
// Numero di sequenze diverse per il rand
const int numRand = _nodes * prevLayerDepth * aligned;
// Alloca la memoria
CHECK(cudaMalloc((void **)&devStates, numRand * sizeof(curandStateXORWOW_t)));
// Inizializzare i weight del livello
Kernel::initWeightK(numBlocks, threadBlocks, weight, _wDim, devStates);
// Inizializzare i bias del livello
Kernel::initBiasK((_alignedNodes / THREADS), THREADS, bias, _nodes, devStates);
#ifdef DEBUG
CHECK(cudaDeviceSynchronize());
std::cout << "\n\nValore dei pesi\n\n";
printFromCudaFormatted(weight, _wDim, _filterWidth);
std::cout << "\n\nValore dei bias\n\n";
printFromCudaFormatted(bias, _nodes, _width);
std::cout << "\n\n\n\n";
#endif
// Distrugge gli stati
CHECK(cudaFree(devStates));
}
void ConvolutionalEDUCNN::forward_propagation(const double * prevOutput) {
#ifdef DEBUG
std::cout << "\n\nValore dell'input\n\n";
printFromCudaFormatted(prevOutput, _prevLayerWidth * _prevLayerWidth * _prevLayerDepth, _prevLayerWidth);
#endif
// Blocchi tridimensionali contenenti tanti thread quanti la grandezza dei filtri
dim3 threadBlocks(_filterWidth, _filterWidth, 1);
// Tanti blocchi quanti sono i nodi in output e il depth del livello precedente
dim3 numBlocks(_width, _height, _prevLayerDepth);
Kernel::createSubmatrixBisK(numBlocks, threadBlocks, subForward, prevOutput, _prevLayerWidth, _filterWidth, _stride, _uniqueNodes);
#ifdef DEBUG_SUB
CHECK(cudaDeviceSynchronize());
std::cout << "\n\nValore submatrici\n\n";
printFromCudaFormatted(subForward, _uniqueNodes * _prevLayerDepth * _filterDim, _filterWidth);
#endif
//ora sono in una situazione simile al fully connected
for (int i = 0; i < _depth; i++) {
for (int j = 0; j < _prevLayerDepth; j++) {
(j == 0) ? beta = 0.0 : beta = 1.0;
CHECK_CUBLAS(cublasDgemv(handle, CUBLAS_OP_T, _filterDim, _uniqueNodes, &alpha, subForward + (j * _uniqueNodes * _filterDim), _filterDim, weight + (i * _filterDim * _prevLayerDepth) + (j * _filterDim), 1, &beta, output + (i * _uniqueNodes), 1));
//CHECK_CUBLAS(cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, 1, _uniqueNodes, _filterDim, &alpha, weight + (i * _filterDim * _prevLayerDepth) + (j * _filterDim), 1, subForward + (j * _uniqueNodes), _filterDim, &beta, output + (i * _uniqueNodes), 1));
}
}
#ifdef DEBUG
CHECK(cudaDeviceSynchronize());
std::cout << "\n\nValore output senza bias\n\n";
printFromCudaFormatted(output, _nodes, _width);
#endif
// Somma con il bias
//CHECK_CUBLAS(
//cublasDaxpy(handle, _nodes, &alpha, bias, 1, output, 1));
//CHECK_CUBLAS(
//cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, 1, _nodes, &alpha, bias, 1, &alpha, output, 1, output, 1));
#ifdef DEBUG
CHECK(cudaDeviceSynchronize());
std::cout << "\n\nValore output *************************************************\n\n";
printFromCudaFormatted(output, _nodes, _width);
#endif
}
void ConvolutionalEDUCNN::calcError() {
//prev error Ŕ l'errore del livello precedente che devo riempire,
//error Ŕ l'errore che ho usato al passo precedente (non ruotato) quando sono passato da questo livello
#ifdef DEBUG
std::cout << "\n\error in calc error\n\n";
printFromCudaFormatted(error, _nodes, _width);
#endif
// Blocchi bidimensionali contenenti tanti thread quanti sono i nodi in output
dim3 threadBlocks(_height, _width, 1);
// Tanti blocchi quanto il numero di filtri
dim3 numBlocks(1, 1, _depth);
Kernel::zeroPaddingBisK(numBlocks, threadBlocks, padding, error, _width, _filterWidth);
#ifdef DEBUG
std::cout << "\n\nerror con zero padding\n\n";
printFromCudaFormatted(padding, paddingSize, paddingWidth);
#endif
// Dimensione insieme submatrici in byte = creo una submatrice per ogni nodo di output di L-1
const int prevUniqueNodes = _prevLayerWidth * _prevLayerWidth;
// Blocchi tridimensionali contenenti tanti thread quanti la grandezza dei filtri
threadBlocks = dim3(_filterWidth, _filterWidth, 1);
// Tanti blocchi quanti sono i nodi in input e il depth del livello precedente
numBlocks = dim3(_prevLayerWidth, _prevLayerWidth, _depth);
Kernel::createSubmatrixBisK(numBlocks, threadBlocks, subCalcError, padding, paddingWidth, _filterWidth, _stride, prevUniqueNodes);
#ifdef DEBUG_SUB
CHECK(cudaDeviceSynchronize());
std::cout << "\n\nValore submatrici zero padding\n\n";
printFromCudaFormatted(subCalcError, prevUniqueNodes * _depth * _filterDim, _filterWidth);
#endif
// Ruoto subito i pesi aggiornati per poi usarli nella backpropagation al livello L-1
// Blocchi bidimensionali contenenti tanti thread quanti il numero di filtri
threadBlocks = dim3(_depth, _prevLayerDepth, 1);
// Tanti blocchi quante sono le righe e le colonne di forwardError
numBlocks = dim3(_filterWidth, _filterWidth, 1);
Kernel::rot180BisK(numBlocks, threadBlocks, weight, weightRot, _filterDim);
#ifdef DEBUG
CHECK(cudaDeviceSynchronize());
std::cout << "\n\nValore dei pesi ruotati\n\n";
printFromCudaFormatted(weightRot, _wDim, _filterWidth);
#endif
//ora sono in una situazione simile alla convoluzione
for (int i = 0; i < _depth; i++) {
for (int j = 0; j < _prevLayerDepth; j++) {
(i == 0) ? beta = 0.0 : beta = 1.0;
CHECK_CUBLAS(cublasDgemv(handle, CUBLAS_OP_T, _filterDim, prevUniqueNodes, &alpha, subCalcError + (i * prevUniqueNodes * _filterDim), _filterDim, weightRot + ((i + j * _depth) * _filterDim), 1, &beta, prevError + (j * prevUniqueNodes), 1));
//CHECK_CUBLAS(cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, 1, prevUniqueNodes, _filterDim, &alpha, weightRot + ((i + j * _depth) * _filterDim), 1, subCalcError + (i * prevUniqueNodes), _filterDim, &beta, prevError + (j * prevUniqueNodes), 1));
}
}
#ifdef DEBUG
CHECK(cudaDeviceSynchronize());
std::cout << "\n\nErrore commesso sui nodi back propagation\n\n";
printFromCudaFormatted(prevError, prevUniqueNodes * _prevLayerDepth, _prevLayerWidth);
#endif
}
void ConvolutionalEDUCNN::back_propagation_output(const double * prevOutput, const uint8_t * labels, const int & target, const double & learningRate) {
// Calcolo dell'errore per ogni nodo
Kernel::outputErrorK((_alignedNodes / THREADS), THREADS, output, error, labels, target, _nodes);
#ifdef DEBUG
CHECK(cudaDeviceSynchronize());
std::cout << "\n\nErrore commesso sui nodi back propagation output\n\n";
printFromCudaFormatted(error, _nodes, _width);
#endif
calcError();
// Aggiornamento pesi
updateWeights(prevOutput, learningRate);
}
void ConvolutionalEDUCNN::back_propagation(const double *prevOutput, double *prevError, const double &learningRate, const bool notFirst) {
error = prevError;
if (notFirst)
calcError();
updateWeights(prevOutput, learningRate);
}
void ConvolutionalEDUCNN::updateWeights(const double *prevOutput, const double &learningRate) {
// Blocchi tridimensionali contenenti tanti thread quanti sono i nodi in output
dim3 threadBlocks(_width, _height, 1);
// Tanti blocchi quanti la grandezza dei filtri e il depth del livello precedente
dim3 numBlocks(_filterWidth, _filterWidth, _prevLayerDepth);
Kernel::createSubmatrixBisK(numBlocks, threadBlocks, subBack, prevOutput, _prevLayerWidth, _width, _stride, _filterDim);
#ifdef DEBUG_SUB
CHECK(cudaDeviceSynchronize());
std::cout << "\n\nValore submatrici backpropagation\n\n";
printFromCudaFormatted(subBack, _uniqueNodes * _filterDim, _width);
#endif
//ora sono in una situazione simile al fully connected
double backAlpha = 1.0 / _uniqueNodes;
beta = 0.0;
for (int i = 0; i < _depth; i++) {
for (int j = 0; j < _prevLayerDepth; j++) {
CHECK_CUBLAS(cublasDgemv(handle, CUBLAS_OP_T, _uniqueNodes, _filterDim, &backAlpha, subBack + (j * _uniqueNodes * _filterDim), _uniqueNodes, error + (i * _uniqueNodes), 1, &beta, tempWeight + ((i + j * _depth) * _filterDim), 1));
//CHECK_CUBLAS(cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, 1, _filterDim, _uniqueNodes, &backAlpha, error + (i * _uniqueNodes), 1, subBack + (j * _filterDim), _uniqueNodes, &beta, tempWeight + ((i + j * _depth) * _filterDim), 1));
}
}
#ifdef DEBUG
CHECK(cudaDeviceSynchronize());
std::cout << "\n\nMatrice temporanea per aggiornamento pesi\n\n";
printFromCudaFormatted(tempWeight, _wDim, _filterWidth);
#endif
// Aggiornamento effettivo dei pesi
CHECK_CUBLAS(
cublasDaxpy(handle, _wDim, &learningRate, tempWeight, 1, weight, 1));
//CHECK_CUBLAS(
//cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, _wDim, _depth, &learningRate, tempWeight, _wDim, &alpha, weight, _wDim, weight, _wDim));
#ifdef DEBUG
CHECK(cudaDeviceSynchronize());
std::cout << "\n\nMatrice dei pesi aggiornata\n\n";
printFromCudaFormatted(weight, _wDim, _filterWidth);
#endif
// Aggiornamento del bias
//CHECK_CUBLAS(
//cublasDaxpy(handle, _nodes, &learningRate, error, 1, bias, 1));
//CHECK_CUBLAS(
//cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, 1, _nodes, &learningRate, errorRot, 1, &alpha, bias, 1, bias, 1));
#ifdef DEBUG
CHECK(cudaDeviceSynchronize());
std::cout << "\n\nVettore del bias aggiornato\n\n";
printFromCudaFormatted(bias, _nodes, _width);
#endif
}
void ConvolutionalEDUCNN::deleteCuda() {
CHECK_CUBLAS(cublasDestroy(handle));
CHECK(cudaFree(weight));
CHECK(cudaFree(weightRot));
CHECK(cudaFree(bias));
CHECK(cudaFree(output));
//CHECK(cudaFree(error));
CHECK(cudaFree(prevError));
CHECK(cudaFree(tempWeight));
CHECK(cudaFree(subForward));
CHECK(cudaFree(subCalcError));
CHECK(cudaFree(subBack));
CHECK(cudaFree(padding));
}
void ConvolutionalEDUCNN::printW() {
printFromCudaFormatted(weight, _wDim, _filterWidth);
//printFromCudaFormatted(bias, _nodes, _width);
}
int ConvolutionalEDUCNN::_calcOutput(bool withPadding) {
//PER ORA NON CONSIDERATO CASO IN CUI SI GENERANO ERRORI (padding numero non intero, filtro pi¨ grande dell'input, stride che non combacia, ecc)
if (_filterWidth > _prevLayerWidth) {
std::cerr << "Le dimensioni del filtro superano le dimensioni del livello precedente!!" << std::endl;
exit(1);
}
if (withPadding) {
_padding = (_filterWidth - 1) / 2;
return _prevLayerWidth;
}
//+(_stride - 1)) serve per aggiornare per eccesso
return ((_prevLayerWidth - _filterWidth + (_stride - 1)) / _stride) + 1;
}
|
8465b40dd6b0d7beeede3bd1d2dd064c0c6c3e90.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/collective/c_softmax_with_cross_entropy_op.h"
#include "paddle/fluid/operators/math/cross_entropy.h"
#include "paddle/fluid/operators/math/softmax_impl.h"
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/device/gpu/nccl_helper.h"
#include "paddle/fluid/string/string_helper.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/kernels/funcs/axis_utils.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
static constexpr int kNumCUDAThreads = 512;
static constexpr int kNumMaxinumNumBlocks = 4096;
static inline int NumBlocks(const int N) {
return ::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads,
kNumMaxinumNumBlocks);
}
template <typename T, typename IndexT>
__global__ void MaskLabelByIndex(T* predicted_logits, const T* logit,
const IndexT* label, const int start_index,
const int end_index, const int64_t N,
const int64_t D, const int nranks) {
CUDA_KERNEL_LOOP(i, N) {
auto real_label = label[i];
PADDLE_ENFORCE((real_label < D * nranks) && (real_label >= 0),
"The index is out of bounds, "
"please check whether the value of label and "
"input meet the class number. It should "
"be less than [%d], but received [%d]",
D * nranks, real_label);
if (real_label >= start_index && real_label < end_index) {
predicted_logits[i] = logit[i * D + real_label - start_index];
}
}
}
template <typename T, typename IndexT>
__global__ void MaskLabelByIndexGrad(T* logits_grad, const T* loss_grad,
const IndexT* labels,
const int start_index, const int end_index,
const int64_t N, const int64_t D) {
CUDA_KERNEL_LOOP(i, N * D) {
auto row = i / D;
auto col = i % D;
if ((col + start_index) == labels[row]) {
logits_grad[i] = (logits_grad[i] - static_cast<T>(1.0)) * loss_grad[row];
} else {
logits_grad[i] *= loss_grad[row];
}
}
}
template <typename T>
class CSoftmaxWithCrossEntropyOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
const int rid = ctx.Attr<int>("ring_id");
auto map = distributed::ProcessGroupMapFromGid::getInstance();
if (map->has(rid)) {
CSoftmaxWithCrossEntropyProcessGroupFunctor<phi::GPUContext, T> functor_;
functor_(ctx);
} else {
CSoftmaxWithCrossEntropyFunctor<phi::GPUContext, T> functor_;
functor_(ctx);
}
}
};
template <typename T>
struct CSoftmaxWithCrossEntropyFunctor<phi::GPUContext, T> {
void operator()(const framework::ExecutionContext& ctx) {
const Tensor* logits = ctx.Input<Tensor>("Logits");
const Tensor* labels = ctx.Input<Tensor>("Label");
Tensor* softmax = ctx.Output<Tensor>("Softmax");
Tensor* loss = ctx.Output<Tensor>("Loss");
const int rid = ctx.Attr<int>("ring_id");
const int nranks = ctx.Attr<int>("nranks");
const int rank = ctx.Attr<int>("rank");
const auto& place = ctx.GetPlace();
const auto& comm = platform::NCCLCommContext::Instance().Get(rid, place);
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
// use global calculate stream
const auto stream = static_cast<platform::CUDADeviceContext*>(
platform::DeviceContextPool::Instance().Get(place))
->stream();
// allocate memory on device.
softmax->mutable_data<T>(place);
loss->mutable_data<T>(place);
const auto& logits_dims = logits->dims();
const auto& labels_dims = labels->dims();
const int axis = logits_dims.size() - 1;
const int N = phi::funcs::SizeToAxis(axis, logits_dims);
const int D = phi::funcs::SizeFromAxis(axis, logits_dims);
Tensor logits_2d, softmax_2d, loss_2d;
logits_2d.ShareDataWith(*logits).Resize({N, D});
softmax_2d.ShareDataWith(*softmax).Resize({N, D});
loss_2d.ShareDataWith(*loss).Resize({N, 1});
auto eigen_logits = math::EigenMatrix<T>::From(logits_2d);
auto eigen_softmax = math::EigenMatrix<T>::From(softmax_2d);
// step 1, obtain logit_max
Tensor logits_max;
logits_max =
ctx.AllocateTmpTensor<T, platform::CUDADeviceContext>({N, 1}, dev_ctx);
void* logits_max_buff = logits_max.mutable_data<T>(place);
auto eigen_logits_max = math::EigenMatrix<T>::From(logits_max);
Eigen::DSizes<int, 1> along_axis(1);
eigen_logits_max.device(*dev_ctx.eigen_device()) =
eigen_logits.maximum(along_axis);
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce(
logits_max_buff, logits_max_buff, logits_max.numel(),
platform::ToNCCLDataType(
framework::TransToProtoVarType(logits_max.dtype())),
ncclMax, comm->comm(), stream));
// step 2, obtain logit - logit_max
Eigen::DSizes<int, 2> batch_by_one(N, 1);
Eigen::DSizes<int, 2> one_by_class(1, D);
eigen_softmax.device(*dev_ctx.eigen_device()) =
(eigen_logits -
eigen_logits_max.reshape(batch_by_one).broadcast(one_by_class))
.unaryExpr(math::ValueClip<T>());
// step 3, obtain predict target
Tensor predicted_logits;
predicted_logits =
ctx.AllocateTmpTensor<T, platform::CUDADeviceContext>({N, 1}, dev_ctx);
predicted_logits.mutable_data<T>(place);
auto t = framework::EigenVector<T>::Flatten(predicted_logits);
t.device(*dev_ctx.eigen_device()) = t.constant(static_cast<T>(0));
const int start_index = rank * D;
const int end_index = start_index + D;
int blocks = NumBlocks(N);
int threads = kNumCUDAThreads;
const auto& label_type = framework::TransToProtoVarType(labels->dtype());
if (label_type == framework::proto::VarType::INT32) {
hipLaunchKernelGGL(( MaskLabelByIndex<T, int32_t>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
predicted_logits.data<T>(), softmax_2d.data<T>(),
labels->data<int32_t>(), start_index, end_index, N, D, nranks);
} else if (label_type == framework::proto::VarType::INT64) {
hipLaunchKernelGGL(( MaskLabelByIndex<T, int64_t>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
predicted_logits.data<T>(), softmax_2d.data<T>(),
labels->data<int64_t>(), start_index, end_index, N, D, nranks);
}
void* predict_logits_buff = predicted_logits.mutable_data<T>(place);
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce(
predict_logits_buff, predict_logits_buff, predicted_logits.numel(),
platform::ToNCCLDataType(
framework::TransToProtoVarType(predicted_logits.dtype())),
ncclSum, comm->comm(), stream));
// step 4, obtain exp(logit)
eigen_softmax.device(*dev_ctx.eigen_device()) = eigen_softmax.exp();
// step 5, obtain sum_exp_logits
Tensor sum_exp_logits;
sum_exp_logits =
ctx.AllocateTmpTensor<T, platform::CUDADeviceContext>({N, 1}, dev_ctx);
void* sum_exp_logits_buff = sum_exp_logits.mutable_data<T>(place);
auto eigen_sum_exp_logits = math::EigenMatrix<T>::From(sum_exp_logits);
eigen_sum_exp_logits.device(*dev_ctx.eigen_device()) =
eigen_softmax.sum(along_axis);
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce(
sum_exp_logits_buff, sum_exp_logits_buff, sum_exp_logits.numel(),
platform::ToNCCLDataType(
framework::TransToProtoVarType(sum_exp_logits.dtype())),
ncclSum, comm->comm(), stream));
auto eigen_loss = math::EigenMatrix<T>::From(loss_2d);
auto eigen_predicted_logits = math::EigenMatrix<T>::From(predicted_logits);
eigen_loss.device(*dev_ctx.eigen_device()) =
(eigen_sum_exp_logits.log().unaryExpr(math::TolerableValue<T>()) -
eigen_predicted_logits)
.unaryExpr(math::TolerableValue<T>());
eigen_softmax.device(*dev_ctx.eigen_device()) =
(eigen_softmax *
eigen_sum_exp_logits.inverse().broadcast(one_by_class));
}
};
template <typename T>
struct CSoftmaxWithCrossEntropyProcessGroupFunctor<phi::GPUContext, T> {
void operator()(const framework::ExecutionContext& ctx) {
const Tensor* logits = ctx.Input<Tensor>("Logits");
const Tensor* labels = ctx.Input<Tensor>("Label");
Tensor* softmax = ctx.Output<Tensor>("Softmax");
Tensor* loss = ctx.Output<Tensor>("Loss");
const int rid = ctx.Attr<int>("ring_id");
const int nranks = ctx.Attr<int>("nranks");
const int rank = ctx.Attr<int>("rank");
const auto& place = ctx.GetPlace();
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
auto map = distributed::ProcessGroupMapFromGid::getInstance();
distributed::ProcessGroup* pg = map->get(rid);
distributed::AllreduceOptions opts;
opts.reduce_op = distributed::ReduceOp::SUM;
// allocate memory on device.
softmax->mutable_data<T>(place);
loss->mutable_data<T>(place);
const auto& logits_dims = logits->dims();
const auto& labels_dims = labels->dims();
const int axis = logits_dims.size() - 1;
const int N = phi::funcs::SizeToAxis(axis, logits_dims);
const int D = phi::funcs::SizeFromAxis(axis, logits_dims);
Tensor logits_2d, softmax_2d, loss_2d;
logits_2d.ShareDataWith(*logits).Resize({N, D});
softmax_2d.ShareDataWith(*softmax).Resize({N, D});
loss_2d.ShareDataWith(*loss).Resize({N, 1});
auto eigen_logits = math::EigenMatrix<T>::From(logits_2d);
auto eigen_softmax = math::EigenMatrix<T>::From(softmax_2d);
// step 1, obtain logit_max
Tensor logits_max;
logits_max =
ctx.AllocateTmpTensor<T, platform::CUDADeviceContext>({N, 1}, dev_ctx);
auto eigen_logits_max = math::EigenMatrix<T>::From(logits_max);
Eigen::DSizes<int, 1> along_axis(1);
eigen_logits_max.device(*dev_ctx.eigen_device()) =
eigen_logits.maximum(along_axis);
std::vector<phi::DenseTensor> in_out;
in_out.push_back(logits_max);
pg->AllReduce(in_out, in_out, opts)->Synchronize();
// step 2, obtain logit - logit_max
Eigen::DSizes<int, 2> batch_by_one(N, 1);
Eigen::DSizes<int, 2> one_by_class(1, D);
eigen_softmax.device(*dev_ctx.eigen_device()) =
(eigen_logits -
eigen_logits_max.reshape(batch_by_one).broadcast(one_by_class))
.unaryExpr(math::ValueClip<T>());
// step 3, obtain predict target
Tensor predicted_logits;
predicted_logits =
ctx.AllocateTmpTensor<T, platform::CUDADeviceContext>({N, 1}, dev_ctx);
predicted_logits.mutable_data<T>(place);
auto t = framework::EigenVector<T>::Flatten(predicted_logits);
t.device(*dev_ctx.eigen_device()) = t.constant(static_cast<T>(0));
const int start_index = rank * D;
const int end_index = start_index + D;
int blocks = NumBlocks(N);
int threads = kNumCUDAThreads;
const auto& label_type = framework::TransToProtoVarType(labels->dtype());
if (label_type == framework::proto::VarType::INT32) {
hipLaunchKernelGGL(( MaskLabelByIndex<T, int32_t>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
predicted_logits.data<T>(), softmax_2d.data<T>(),
labels->data<int32_t>(), start_index, end_index, N, D, nranks);
} else if (label_type == framework::proto::VarType::INT64) {
hipLaunchKernelGGL(( MaskLabelByIndex<T, int64_t>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
predicted_logits.data<T>(), softmax_2d.data<T>(),
labels->data<int64_t>(), start_index, end_index, N, D, nranks);
}
in_out.clear();
in_out.push_back(predicted_logits);
pg->AllReduce(in_out, in_out, opts)->Synchronize();
// step 4, obtain exp(logit)
eigen_softmax.device(*dev_ctx.eigen_device()) = eigen_softmax.exp();
// step 5, obtain sum_exp_logits
Tensor sum_exp_logits;
sum_exp_logits =
ctx.AllocateTmpTensor<T, platform::CUDADeviceContext>({N, 1}, dev_ctx);
void* sum_exp_logits_buff = sum_exp_logits.mutable_data<T>(place);
auto eigen_sum_exp_logits = math::EigenMatrix<T>::From(sum_exp_logits);
eigen_sum_exp_logits.device(*dev_ctx.eigen_device()) =
eigen_softmax.sum(along_axis);
in_out.clear();
in_out.push_back(sum_exp_logits);
pg->AllReduce(in_out, in_out, opts)->Synchronize();
auto eigen_loss = math::EigenMatrix<T>::From(loss_2d);
auto eigen_predicted_logits = math::EigenMatrix<T>::From(predicted_logits);
eigen_loss.device(*dev_ctx.eigen_device()) =
(eigen_sum_exp_logits.log().unaryExpr(math::TolerableValue<T>()) -
eigen_predicted_logits)
.unaryExpr(math::TolerableValue<T>());
eigen_softmax.device(*dev_ctx.eigen_device()) =
(eigen_softmax *
eigen_sum_exp_logits.inverse().broadcast(one_by_class));
}
};
template <typename T>
class CSoftmaxWithCrossEntropyGradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
const Tensor* labels = context.Input<Tensor>("Label");
const Tensor* loss_grad =
context.Input<Tensor>(framework::GradVarName("Loss"));
Tensor* logit_grad =
context.Output<Tensor>(framework::GradVarName("Logits"));
const Tensor* softmax = context.Input<Tensor>("Softmax");
const int rank = context.Attr<int>("rank");
auto& dev_ctx =
context.template device_context<platform::CUDADeviceContext>();
if (logit_grad != softmax) {
framework::TensorCopy(*softmax, context.GetPlace(),
context.device_context(), logit_grad);
}
const auto sofrmax_dims = softmax->dims();
const int axis = sofrmax_dims.size() - 1;
const int N = phi::funcs::SizeToAxis(axis, sofrmax_dims);
const int D = phi::funcs::SizeFromAxis(axis, sofrmax_dims);
Tensor logit_grad_2d;
logit_grad_2d.ShareDataWith(*logit_grad).Resize({N, D});
int blocks = NumBlocks(N * D);
int threads = kNumCUDAThreads;
const auto& label_type = framework::TransToProtoVarType(labels->dtype());
const int start_index = rank * D;
const int end_index = start_index + D;
if (label_type == framework::proto::VarType::INT32) {
hipLaunchKernelGGL(( MaskLabelByIndexGrad<T,
int32_t>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
logit_grad_2d.data<T>(), loss_grad->data<T>(),
labels->data<int32_t>(), start_index, end_index, N, D);
} else if (label_type == framework::proto::VarType::INT64) {
hipLaunchKernelGGL(( MaskLabelByIndexGrad<T,
int64_t>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
logit_grad_2d.data<T>(), loss_grad->data<T>(),
labels->data<int64_t>(), start_index, end_index, N, D);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(
c_softmax_with_cross_entropy,
ops::CSoftmaxWithCrossEntropyOpCUDAKernel<float>,
ops::CSoftmaxWithCrossEntropyOpCUDAKernel<double>,
ops::CSoftmaxWithCrossEntropyOpCUDAKernel<plat::float16>);
REGISTER_OP_CUDA_KERNEL(
c_softmax_with_cross_entropy_grad,
ops::CSoftmaxWithCrossEntropyGradCUDAKernel<float>,
ops::CSoftmaxWithCrossEntropyGradCUDAKernel<paddle::platform::float16>,
ops::CSoftmaxWithCrossEntropyGradCUDAKernel<double>);
|
8465b40dd6b0d7beeede3bd1d2dd064c0c6c3e90.cu
|
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/collective/c_softmax_with_cross_entropy_op.h"
#include "paddle/fluid/operators/math/cross_entropy.h"
#include "paddle/fluid/operators/math/softmax_impl.h"
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/device/gpu/nccl_helper.h"
#include "paddle/fluid/string/string_helper.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/kernels/funcs/axis_utils.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
static constexpr int kNumCUDAThreads = 512;
static constexpr int kNumMaxinumNumBlocks = 4096;
static inline int NumBlocks(const int N) {
return std::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads,
kNumMaxinumNumBlocks);
}
template <typename T, typename IndexT>
__global__ void MaskLabelByIndex(T* predicted_logits, const T* logit,
const IndexT* label, const int start_index,
const int end_index, const int64_t N,
const int64_t D, const int nranks) {
CUDA_KERNEL_LOOP(i, N) {
auto real_label = label[i];
PADDLE_ENFORCE((real_label < D * nranks) && (real_label >= 0),
"The index is out of bounds, "
"please check whether the value of label and "
"input meet the class number. It should "
"be less than [%d], but received [%d]",
D * nranks, real_label);
if (real_label >= start_index && real_label < end_index) {
predicted_logits[i] = logit[i * D + real_label - start_index];
}
}
}
template <typename T, typename IndexT>
__global__ void MaskLabelByIndexGrad(T* logits_grad, const T* loss_grad,
const IndexT* labels,
const int start_index, const int end_index,
const int64_t N, const int64_t D) {
CUDA_KERNEL_LOOP(i, N * D) {
auto row = i / D;
auto col = i % D;
if ((col + start_index) == labels[row]) {
logits_grad[i] = (logits_grad[i] - static_cast<T>(1.0)) * loss_grad[row];
} else {
logits_grad[i] *= loss_grad[row];
}
}
}
template <typename T>
class CSoftmaxWithCrossEntropyOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
const int rid = ctx.Attr<int>("ring_id");
auto map = distributed::ProcessGroupMapFromGid::getInstance();
if (map->has(rid)) {
CSoftmaxWithCrossEntropyProcessGroupFunctor<phi::GPUContext, T> functor_;
functor_(ctx);
} else {
CSoftmaxWithCrossEntropyFunctor<phi::GPUContext, T> functor_;
functor_(ctx);
}
}
};
template <typename T>
struct CSoftmaxWithCrossEntropyFunctor<phi::GPUContext, T> {
void operator()(const framework::ExecutionContext& ctx) {
const Tensor* logits = ctx.Input<Tensor>("Logits");
const Tensor* labels = ctx.Input<Tensor>("Label");
Tensor* softmax = ctx.Output<Tensor>("Softmax");
Tensor* loss = ctx.Output<Tensor>("Loss");
const int rid = ctx.Attr<int>("ring_id");
const int nranks = ctx.Attr<int>("nranks");
const int rank = ctx.Attr<int>("rank");
const auto& place = ctx.GetPlace();
const auto& comm = platform::NCCLCommContext::Instance().Get(rid, place);
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
// use global calculate stream
const auto stream = static_cast<platform::CUDADeviceContext*>(
platform::DeviceContextPool::Instance().Get(place))
->stream();
// allocate memory on device.
softmax->mutable_data<T>(place);
loss->mutable_data<T>(place);
const auto& logits_dims = logits->dims();
const auto& labels_dims = labels->dims();
const int axis = logits_dims.size() - 1;
const int N = phi::funcs::SizeToAxis(axis, logits_dims);
const int D = phi::funcs::SizeFromAxis(axis, logits_dims);
Tensor logits_2d, softmax_2d, loss_2d;
logits_2d.ShareDataWith(*logits).Resize({N, D});
softmax_2d.ShareDataWith(*softmax).Resize({N, D});
loss_2d.ShareDataWith(*loss).Resize({N, 1});
auto eigen_logits = math::EigenMatrix<T>::From(logits_2d);
auto eigen_softmax = math::EigenMatrix<T>::From(softmax_2d);
// step 1, obtain logit_max
Tensor logits_max;
logits_max =
ctx.AllocateTmpTensor<T, platform::CUDADeviceContext>({N, 1}, dev_ctx);
void* logits_max_buff = logits_max.mutable_data<T>(place);
auto eigen_logits_max = math::EigenMatrix<T>::From(logits_max);
Eigen::DSizes<int, 1> along_axis(1);
eigen_logits_max.device(*dev_ctx.eigen_device()) =
eigen_logits.maximum(along_axis);
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce(
logits_max_buff, logits_max_buff, logits_max.numel(),
platform::ToNCCLDataType(
framework::TransToProtoVarType(logits_max.dtype())),
ncclMax, comm->comm(), stream));
// step 2, obtain logit - logit_max
Eigen::DSizes<int, 2> batch_by_one(N, 1);
Eigen::DSizes<int, 2> one_by_class(1, D);
eigen_softmax.device(*dev_ctx.eigen_device()) =
(eigen_logits -
eigen_logits_max.reshape(batch_by_one).broadcast(one_by_class))
.unaryExpr(math::ValueClip<T>());
// step 3, obtain predict target
Tensor predicted_logits;
predicted_logits =
ctx.AllocateTmpTensor<T, platform::CUDADeviceContext>({N, 1}, dev_ctx);
predicted_logits.mutable_data<T>(place);
auto t = framework::EigenVector<T>::Flatten(predicted_logits);
t.device(*dev_ctx.eigen_device()) = t.constant(static_cast<T>(0));
const int start_index = rank * D;
const int end_index = start_index + D;
int blocks = NumBlocks(N);
int threads = kNumCUDAThreads;
const auto& label_type = framework::TransToProtoVarType(labels->dtype());
if (label_type == framework::proto::VarType::INT32) {
MaskLabelByIndex<T, int32_t><<<blocks, threads, 0, dev_ctx.stream()>>>(
predicted_logits.data<T>(), softmax_2d.data<T>(),
labels->data<int32_t>(), start_index, end_index, N, D, nranks);
} else if (label_type == framework::proto::VarType::INT64) {
MaskLabelByIndex<T, int64_t><<<blocks, threads, 0, dev_ctx.stream()>>>(
predicted_logits.data<T>(), softmax_2d.data<T>(),
labels->data<int64_t>(), start_index, end_index, N, D, nranks);
}
void* predict_logits_buff = predicted_logits.mutable_data<T>(place);
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce(
predict_logits_buff, predict_logits_buff, predicted_logits.numel(),
platform::ToNCCLDataType(
framework::TransToProtoVarType(predicted_logits.dtype())),
ncclSum, comm->comm(), stream));
// step 4, obtain exp(logit)
eigen_softmax.device(*dev_ctx.eigen_device()) = eigen_softmax.exp();
// step 5, obtain sum_exp_logits
Tensor sum_exp_logits;
sum_exp_logits =
ctx.AllocateTmpTensor<T, platform::CUDADeviceContext>({N, 1}, dev_ctx);
void* sum_exp_logits_buff = sum_exp_logits.mutable_data<T>(place);
auto eigen_sum_exp_logits = math::EigenMatrix<T>::From(sum_exp_logits);
eigen_sum_exp_logits.device(*dev_ctx.eigen_device()) =
eigen_softmax.sum(along_axis);
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce(
sum_exp_logits_buff, sum_exp_logits_buff, sum_exp_logits.numel(),
platform::ToNCCLDataType(
framework::TransToProtoVarType(sum_exp_logits.dtype())),
ncclSum, comm->comm(), stream));
auto eigen_loss = math::EigenMatrix<T>::From(loss_2d);
auto eigen_predicted_logits = math::EigenMatrix<T>::From(predicted_logits);
eigen_loss.device(*dev_ctx.eigen_device()) =
(eigen_sum_exp_logits.log().unaryExpr(math::TolerableValue<T>()) -
eigen_predicted_logits)
.unaryExpr(math::TolerableValue<T>());
eigen_softmax.device(*dev_ctx.eigen_device()) =
(eigen_softmax *
eigen_sum_exp_logits.inverse().broadcast(one_by_class));
}
};
template <typename T>
struct CSoftmaxWithCrossEntropyProcessGroupFunctor<phi::GPUContext, T> {
void operator()(const framework::ExecutionContext& ctx) {
const Tensor* logits = ctx.Input<Tensor>("Logits");
const Tensor* labels = ctx.Input<Tensor>("Label");
Tensor* softmax = ctx.Output<Tensor>("Softmax");
Tensor* loss = ctx.Output<Tensor>("Loss");
const int rid = ctx.Attr<int>("ring_id");
const int nranks = ctx.Attr<int>("nranks");
const int rank = ctx.Attr<int>("rank");
const auto& place = ctx.GetPlace();
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
auto map = distributed::ProcessGroupMapFromGid::getInstance();
distributed::ProcessGroup* pg = map->get(rid);
distributed::AllreduceOptions opts;
opts.reduce_op = distributed::ReduceOp::SUM;
// allocate memory on device.
softmax->mutable_data<T>(place);
loss->mutable_data<T>(place);
const auto& logits_dims = logits->dims();
const auto& labels_dims = labels->dims();
const int axis = logits_dims.size() - 1;
const int N = phi::funcs::SizeToAxis(axis, logits_dims);
const int D = phi::funcs::SizeFromAxis(axis, logits_dims);
Tensor logits_2d, softmax_2d, loss_2d;
logits_2d.ShareDataWith(*logits).Resize({N, D});
softmax_2d.ShareDataWith(*softmax).Resize({N, D});
loss_2d.ShareDataWith(*loss).Resize({N, 1});
auto eigen_logits = math::EigenMatrix<T>::From(logits_2d);
auto eigen_softmax = math::EigenMatrix<T>::From(softmax_2d);
// step 1, obtain logit_max
Tensor logits_max;
logits_max =
ctx.AllocateTmpTensor<T, platform::CUDADeviceContext>({N, 1}, dev_ctx);
auto eigen_logits_max = math::EigenMatrix<T>::From(logits_max);
Eigen::DSizes<int, 1> along_axis(1);
eigen_logits_max.device(*dev_ctx.eigen_device()) =
eigen_logits.maximum(along_axis);
std::vector<phi::DenseTensor> in_out;
in_out.push_back(logits_max);
pg->AllReduce(in_out, in_out, opts)->Synchronize();
// step 2, obtain logit - logit_max
Eigen::DSizes<int, 2> batch_by_one(N, 1);
Eigen::DSizes<int, 2> one_by_class(1, D);
eigen_softmax.device(*dev_ctx.eigen_device()) =
(eigen_logits -
eigen_logits_max.reshape(batch_by_one).broadcast(one_by_class))
.unaryExpr(math::ValueClip<T>());
// step 3, obtain predict target
Tensor predicted_logits;
predicted_logits =
ctx.AllocateTmpTensor<T, platform::CUDADeviceContext>({N, 1}, dev_ctx);
predicted_logits.mutable_data<T>(place);
auto t = framework::EigenVector<T>::Flatten(predicted_logits);
t.device(*dev_ctx.eigen_device()) = t.constant(static_cast<T>(0));
const int start_index = rank * D;
const int end_index = start_index + D;
int blocks = NumBlocks(N);
int threads = kNumCUDAThreads;
const auto& label_type = framework::TransToProtoVarType(labels->dtype());
if (label_type == framework::proto::VarType::INT32) {
MaskLabelByIndex<T, int32_t><<<blocks, threads, 0, dev_ctx.stream()>>>(
predicted_logits.data<T>(), softmax_2d.data<T>(),
labels->data<int32_t>(), start_index, end_index, N, D, nranks);
} else if (label_type == framework::proto::VarType::INT64) {
MaskLabelByIndex<T, int64_t><<<blocks, threads, 0, dev_ctx.stream()>>>(
predicted_logits.data<T>(), softmax_2d.data<T>(),
labels->data<int64_t>(), start_index, end_index, N, D, nranks);
}
in_out.clear();
in_out.push_back(predicted_logits);
pg->AllReduce(in_out, in_out, opts)->Synchronize();
// step 4, obtain exp(logit)
eigen_softmax.device(*dev_ctx.eigen_device()) = eigen_softmax.exp();
// step 5, obtain sum_exp_logits
Tensor sum_exp_logits;
sum_exp_logits =
ctx.AllocateTmpTensor<T, platform::CUDADeviceContext>({N, 1}, dev_ctx);
void* sum_exp_logits_buff = sum_exp_logits.mutable_data<T>(place);
auto eigen_sum_exp_logits = math::EigenMatrix<T>::From(sum_exp_logits);
eigen_sum_exp_logits.device(*dev_ctx.eigen_device()) =
eigen_softmax.sum(along_axis);
in_out.clear();
in_out.push_back(sum_exp_logits);
pg->AllReduce(in_out, in_out, opts)->Synchronize();
auto eigen_loss = math::EigenMatrix<T>::From(loss_2d);
auto eigen_predicted_logits = math::EigenMatrix<T>::From(predicted_logits);
eigen_loss.device(*dev_ctx.eigen_device()) =
(eigen_sum_exp_logits.log().unaryExpr(math::TolerableValue<T>()) -
eigen_predicted_logits)
.unaryExpr(math::TolerableValue<T>());
eigen_softmax.device(*dev_ctx.eigen_device()) =
(eigen_softmax *
eigen_sum_exp_logits.inverse().broadcast(one_by_class));
}
};
template <typename T>
class CSoftmaxWithCrossEntropyGradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
const Tensor* labels = context.Input<Tensor>("Label");
const Tensor* loss_grad =
context.Input<Tensor>(framework::GradVarName("Loss"));
Tensor* logit_grad =
context.Output<Tensor>(framework::GradVarName("Logits"));
const Tensor* softmax = context.Input<Tensor>("Softmax");
const int rank = context.Attr<int>("rank");
auto& dev_ctx =
context.template device_context<platform::CUDADeviceContext>();
if (logit_grad != softmax) {
framework::TensorCopy(*softmax, context.GetPlace(),
context.device_context(), logit_grad);
}
const auto sofrmax_dims = softmax->dims();
const int axis = sofrmax_dims.size() - 1;
const int N = phi::funcs::SizeToAxis(axis, sofrmax_dims);
const int D = phi::funcs::SizeFromAxis(axis, sofrmax_dims);
Tensor logit_grad_2d;
logit_grad_2d.ShareDataWith(*logit_grad).Resize({N, D});
int blocks = NumBlocks(N * D);
int threads = kNumCUDAThreads;
const auto& label_type = framework::TransToProtoVarType(labels->dtype());
const int start_index = rank * D;
const int end_index = start_index + D;
if (label_type == framework::proto::VarType::INT32) {
MaskLabelByIndexGrad<T,
int32_t><<<blocks, threads, 0, dev_ctx.stream()>>>(
logit_grad_2d.data<T>(), loss_grad->data<T>(),
labels->data<int32_t>(), start_index, end_index, N, D);
} else if (label_type == framework::proto::VarType::INT64) {
MaskLabelByIndexGrad<T,
int64_t><<<blocks, threads, 0, dev_ctx.stream()>>>(
logit_grad_2d.data<T>(), loss_grad->data<T>(),
labels->data<int64_t>(), start_index, end_index, N, D);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(
c_softmax_with_cross_entropy,
ops::CSoftmaxWithCrossEntropyOpCUDAKernel<float>,
ops::CSoftmaxWithCrossEntropyOpCUDAKernel<double>,
ops::CSoftmaxWithCrossEntropyOpCUDAKernel<plat::float16>);
REGISTER_OP_CUDA_KERNEL(
c_softmax_with_cross_entropy_grad,
ops::CSoftmaxWithCrossEntropyGradCUDAKernel<float>,
ops::CSoftmaxWithCrossEntropyGradCUDAKernel<paddle::platform::float16>,
ops::CSoftmaxWithCrossEntropyGradCUDAKernel<double>);
|
11f82cc4a395f04a05c7ffe70defa901a4ba39bf.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Stokes Flow with Drag Component kernel
// Last updated: 02/14/13
#ifndef _SLIP_KERNEL_H_
#define _SLIP_KERNEL_H_
// Thread block size
#define POINTS 256
#define MATRIX_SIZE 16
#define THREADS 256
#define BLOCKS 256
#define BLOCK_SIZE THREADS/BLOCKS
#define BLOCK_AREA THREADS*BLOCKS
__constant__ double INVERSE_QUARTER_PI = M_1_PI/4.0f;
// CUDA kernel
__global__ void SlipKernel(double *xd_at, double *yd_at, double* xd_dueto, double* yd_dueto, double* Fxd, double* Fyd, double* Vxd, double* Vyd, double visc, double e, double esq)
{
// block ID
int bx = blockIdx.x;
// cache thread ID
int tx = threadIdx.x;
int idx = threadIdx.x + blockIdx.x * THREADS;
// Declaration of shared memory arrays
__shared__ float cache_x[THREADS];
__shared__ float cache_y[THREADS];
int at = floor((float)tx/MATRIX_SIZE) + floor((float)bx/MATRIX_SIZE)*MATRIX_SIZE;
int dueto = (idx % MATRIX_SIZE) + MATRIX_SIZE*(bx%MATRIX_SIZE);
//int at = floor((float)idx/POINTS); /* 4 right shifting is probably faster than division by 16 */
//int dueto = idx % POINTS;
//printf("DBG: thrd %d, blk %d, idx %d, at %d, dueto %d\n", tx, bx, idx, at, dueto);
// Each thread fills Stokeslet matrix
double rk = sqrt(powf(xd_at[at]-xd_dueto[dueto],2) + powf(yd_at[at]-yd_dueto[dueto],2));
double sq = sqrtf(powf(rk,2) + esq);
double p1 = (INVERSE_QUARTER_PI/visc) * (logf(sq+e)-(e*(sq+2*e))/(sq*(sq+e)));
double p2 = (INVERSE_QUARTER_PI/visc) * (sq+2*e)/(sq*powf(sq+e,2));
// Sub-Stokeslet matrix
cache_x[tx] = -p1*Fxd[dueto] + p2*(powf(xd_at[at]-xd_dueto[dueto],2)*Fxd[dueto] + (xd_at[at]-xd_dueto[dueto])*(yd_at[at]-yd_dueto[dueto])*Fyd[dueto]);
cache_y[tx] = -p1*Fyd[dueto] + p2*((xd_at[at]-xd_dueto[dueto])*(yd_at[at]-yd_dueto[dueto])*Fxd[dueto] + powf(yd_at[at]-yd_dueto[dueto],2)*Fyd[dueto]);
//#define TEST_REDUCTION2
#ifdef TEST_REDUCTION
cache_x[tx] = 1.0;
cache_y[tx] = 1.0;
#endif
#ifdef TEST_REDUCTION2
cache_x[tx] = tx%MATRIX_SIZE;
cache_y[tx] = tx%MATRIX_SIZE;
#endif
// Synchronize all threads in a block to ensure submatrix is computed and loaded
__syncthreads();
//printf("DBG: thrd:%d block:%d & stokeslet (%f, %f)\n", tx, bx, cache_x[tx], cache_y[tx]);
// Reduction
// only half the threads work (rest chill and go on for the ride)
int j = blockDim.x/2; // keeps track of active threads
int k = MATRIX_SIZE/2; // keeps track of which neighbor you add you value with & simulateounsly
// many entries per row should be changed by this code
while (j >= MATRIX_SIZE ) {
if ( (tx%MATRIX_SIZE) < k ) { // for each row we add your value + value of k away neighbor
cache_x[tx] = cache_x[tx] + cache_x[tx+k];
cache_y[tx] = cache_y[tx] + cache_y[tx+k];
}
j = j >> 1;
k = k >> 1;
__syncthreads();
}
#if 0
for (i=0;i<MATRIX_SIZE;i++)
printf("[%d] %d: %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f\n", tx, i, cache_x[i*MATRIX_SIZE],cache_x[i*MATRIX_SIZE+1],cache_x[i*MATRIX_SIZE+2],cache_x[i*MATRIX_SIZE+3],cache_x[i*MATRIX_SIZE+4],cache_x[i*MATRIX_SIZE+5],cache_x[i*MATRIX_SIZE+6],cache_x[i*MATRIX_SIZE+7],cache_x[i*MATRIX_SIZE+8],cache_x[i*MATRIX_SIZE+9],cache_x[i*MATRIX_SIZE+10],cache_x[i*MATRIX_SIZE+11],cache_x[i*MATRIX_SIZE+12],cache_x[i*MATRIX_SIZE+13],cache_x[i*MATRIX_SIZE+14],cache_x[i*MATRIX_SIZE+15]);
#endif
// Update velocity per stride
if( idx%MATRIX_SIZE == 0) {
Vxd[idx/MATRIX_SIZE] = cache_x[tx];
Vyd[idx/MATRIX_SIZE] = cache_y[tx];
}
//if ( (idx%MATRIX_SIZE == 0) ) {
// Vxd[idx/MATRIX_SIZE] = 1.0;
// Vyd[idx/MATRIX_SIZE] = 1.0;
//}
//if ( (idx%POINTS == 0) ) {
// for(int i=0; i<POINTS/MATRIX_SIZE; i++) {
// Vxd[idx/POINTS] += cache_x[tx+i*MATRIX_SIZE];
// Vyd[idx/POINTS] += cache_y[tx+i*MATRIX_SIZE];
// }
//}
}
#endif // #ifndef _SLIP_KERNEL_H
|
11f82cc4a395f04a05c7ffe70defa901a4ba39bf.cu
|
// Stokes Flow with Drag Component kernel
// Last updated: 02/14/13
#ifndef _SLIP_KERNEL_H_
#define _SLIP_KERNEL_H_
// Thread block size
#define POINTS 256
#define MATRIX_SIZE 16
#define THREADS 256
#define BLOCKS 256
#define BLOCK_SIZE THREADS/BLOCKS
#define BLOCK_AREA THREADS*BLOCKS
__constant__ double INVERSE_QUARTER_PI = M_1_PI/4.0f;
// CUDA kernel
__global__ void SlipKernel(double *xd_at, double *yd_at, double* xd_dueto, double* yd_dueto, double* Fxd, double* Fyd, double* Vxd, double* Vyd, double visc, double e, double esq)
{
// block ID
int bx = blockIdx.x;
// cache thread ID
int tx = threadIdx.x;
int idx = threadIdx.x + blockIdx.x * THREADS;
// Declaration of shared memory arrays
__shared__ float cache_x[THREADS];
__shared__ float cache_y[THREADS];
int at = floor((float)tx/MATRIX_SIZE) + floor((float)bx/MATRIX_SIZE)*MATRIX_SIZE;
int dueto = (idx % MATRIX_SIZE) + MATRIX_SIZE*(bx%MATRIX_SIZE);
//int at = floor((float)idx/POINTS); /* 4 right shifting is probably faster than division by 16 */
//int dueto = idx % POINTS;
//printf("DBG: thrd %d, blk %d, idx %d, at %d, dueto %d\n", tx, bx, idx, at, dueto);
// Each thread fills Stokeslet matrix
double rk = sqrt(powf(xd_at[at]-xd_dueto[dueto],2) + powf(yd_at[at]-yd_dueto[dueto],2));
double sq = sqrtf(powf(rk,2) + esq);
double p1 = (INVERSE_QUARTER_PI/visc) * (logf(sq+e)-(e*(sq+2*e))/(sq*(sq+e)));
double p2 = (INVERSE_QUARTER_PI/visc) * (sq+2*e)/(sq*powf(sq+e,2));
// Sub-Stokeslet matrix
cache_x[tx] = -p1*Fxd[dueto] + p2*(powf(xd_at[at]-xd_dueto[dueto],2)*Fxd[dueto] + (xd_at[at]-xd_dueto[dueto])*(yd_at[at]-yd_dueto[dueto])*Fyd[dueto]);
cache_y[tx] = -p1*Fyd[dueto] + p2*((xd_at[at]-xd_dueto[dueto])*(yd_at[at]-yd_dueto[dueto])*Fxd[dueto] + powf(yd_at[at]-yd_dueto[dueto],2)*Fyd[dueto]);
//#define TEST_REDUCTION2
#ifdef TEST_REDUCTION
cache_x[tx] = 1.0;
cache_y[tx] = 1.0;
#endif
#ifdef TEST_REDUCTION2
cache_x[tx] = tx%MATRIX_SIZE;
cache_y[tx] = tx%MATRIX_SIZE;
#endif
// Synchronize all threads in a block to ensure submatrix is computed and loaded
__syncthreads();
//printf("DBG: thrd:%d block:%d & stokeslet (%f, %f)\n", tx, bx, cache_x[tx], cache_y[tx]);
// Reduction
// only half the threads work (rest chill and go on for the ride)
int j = blockDim.x/2; // keeps track of active threads
int k = MATRIX_SIZE/2; // keeps track of which neighbor you add you value with & simulateounsly
// many entries per row should be changed by this code
while (j >= MATRIX_SIZE ) {
if ( (tx%MATRIX_SIZE) < k ) { // for each row we add your value + value of k away neighbor
cache_x[tx] = cache_x[tx] + cache_x[tx+k];
cache_y[tx] = cache_y[tx] + cache_y[tx+k];
}
j = j >> 1;
k = k >> 1;
__syncthreads();
}
#if 0
for (i=0;i<MATRIX_SIZE;i++)
printf("[%d] %d: %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f\n", tx, i, cache_x[i*MATRIX_SIZE],cache_x[i*MATRIX_SIZE+1],cache_x[i*MATRIX_SIZE+2],cache_x[i*MATRIX_SIZE+3],cache_x[i*MATRIX_SIZE+4],cache_x[i*MATRIX_SIZE+5],cache_x[i*MATRIX_SIZE+6],cache_x[i*MATRIX_SIZE+7],cache_x[i*MATRIX_SIZE+8],cache_x[i*MATRIX_SIZE+9],cache_x[i*MATRIX_SIZE+10],cache_x[i*MATRIX_SIZE+11],cache_x[i*MATRIX_SIZE+12],cache_x[i*MATRIX_SIZE+13],cache_x[i*MATRIX_SIZE+14],cache_x[i*MATRIX_SIZE+15]);
#endif
// Update velocity per stride
if( idx%MATRIX_SIZE == 0) {
Vxd[idx/MATRIX_SIZE] = cache_x[tx];
Vyd[idx/MATRIX_SIZE] = cache_y[tx];
}
//if ( (idx%MATRIX_SIZE == 0) ) {
// Vxd[idx/MATRIX_SIZE] = 1.0;
// Vyd[idx/MATRIX_SIZE] = 1.0;
//}
//if ( (idx%POINTS == 0) ) {
// for(int i=0; i<POINTS/MATRIX_SIZE; i++) {
// Vxd[idx/POINTS] += cache_x[tx+i*MATRIX_SIZE];
// Vyd[idx/POINTS] += cache_y[tx+i*MATRIX_SIZE];
// }
//}
}
#endif // #ifndef _SLIP_KERNEL_H
|
6e9c968333779aa926a74c249f92d4e4492acf00.hip
|
// !!! This is a file automatically generated by hipify!!!
/*-------------------------------------------------------------------------
*
* CUDA functions for texture-memory interpolation based projection
*
* This file has the necesary fucntiosn to perform X-ray CBCT projection
* operation given a geaometry, angles and image. It uses the 3D texture
* memory linear interpolation to uniformily sample a path to integrate the
* X-rays.
*
* CODE by Ander Biguri
* Sepideh Hatamikia (arbitrary rotation)
* ---------------------------------------------------------------------------
* ---------------------------------------------------------------------------
* Copyright (c) 2015, University of Bath and CERN- European Organization for
* Nuclear Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------
*
* Contact: tigre.toolbox@gmail.com
* Codes : https://github.com/CERN/TIGRE
* ---------------------------------------------------------------------------
*/
#include <algorithm>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include "ray_interpolated_projection.hpp"
#include "TIGRE_common.hpp"
#include <math.h>
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
mexPrintf("%s \n",msg);\
hipDeviceReset();\
mexErrMsgIdAndTxt("TIGRE:Ax:interpolated",hipGetErrorString(__err));\
} \
} while (0)
#define MAXTREADS 1024
#define PROJ_PER_BLOCK 9
#define PIXEL_SIZE_BLOCK 9
/*GEOMETRY DEFINITION
*
* Detector plane, behind
* |-----------------------------|
* | |
* | |
* | |
* | |
* | +--------+ |
* | / /| |
* A Z | / / |*D |
* | | +--------+ | |
* | | | | | |
* | | | *O | + |
* --->y | | | / |
* / | | |/ |
* V X | +--------+ |
* |-----------------------------|
*
* *S
*
*
*
*
*
**/
void CreateTextureInterp(const GpuIds& gpuids,const float* imagedata,Geometry geo,hipArray** d_cuArrTex, hipTextureObject_t *texImage,bool allocate);
__constant__ Point3D projParamsArrayDev[4*PROJ_PER_BLOCK]; // Dev means it is on device
__constant__ float projFloatsArrayDev[2*PROJ_PER_BLOCK]; // Dev means it is on device
__global__ void vecAddInPlaceInterp(float *a, float *b, unsigned long n)
{
int idx = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (idx < n)
a[idx] = a[idx] + b[idx];
}
template<bool sphericalrotation>
__global__ void kernelPixelDetector( Geometry geo,
float* detector,
const int currProjSetNumber,
const int totalNoOfProjections,
hipTextureObject_t tex){
unsigned long long u = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long v = blockIdx.y * blockDim.y + threadIdx.y;
unsigned long long projNumber=threadIdx.z;
if (u>= geo.nDetecU || v>= geo.nDetecV || projNumber>=PROJ_PER_BLOCK)
return;
#if IS_FOR_MATLAB_TIGRE
size_t idx = (size_t)(u * (unsigned long long)geo.nDetecV + v)+ projNumber*(unsigned long long)geo.nDetecV *(unsigned long long)geo.nDetecU ;
#else
size_t idx = (size_t)(v * (unsigned long long)geo.nDetecU + u)+ projNumber*(unsigned long long)geo.nDetecV *(unsigned long long)geo.nDetecU ;
#endif
unsigned long indAlpha = currProjSetNumber*PROJ_PER_BLOCK+projNumber; // This is the ABSOLUTE projection number in the projection array
if(indAlpha>=totalNoOfProjections)
return;
Point3D uvOrigin = projParamsArrayDev[4*projNumber]; // 6*projNumber because we have 6 Point3D values per projection
Point3D deltaU = projParamsArrayDev[4*projNumber+1];
Point3D deltaV = projParamsArrayDev[4*projNumber+2];
Point3D source = projParamsArrayDev[4*projNumber+3];
float DSO = projFloatsArrayDev[2*projNumber+0];
float cropdist_init = projFloatsArrayDev[2*projNumber+1];
/////// Get coordinates XYZ of pixel UV
unsigned long pixelV = geo.nDetecV-v-1;
unsigned long pixelU = u;
float vectX,vectY,vectZ;
Point3D P;
P.x=(uvOrigin.x+pixelU*deltaU.x+pixelV*deltaV.x);
P.y=(uvOrigin.y+pixelU*deltaU.y+pixelV*deltaV.y);
P.z=(uvOrigin.z+pixelU*deltaU.z+pixelV*deltaV.z);
// Length is the ray length in normalized space
float length=__fsqrt_rd((source.x-P.x)*(source.x-P.x)+(source.y-P.y)*(source.y-P.y)+(source.z-P.z)*(source.z-P.z));
//now legth is an integer of Nsamples that are required on this line
length=ceilf(__fdividef(length,geo.accuracy));//Divide the directional vector by an integer
vectX=__fdividef(P.x -source.x,length);
vectY=__fdividef(P.y -source.y,length);
vectZ=__fdividef(P.z -source.z,length);
// //Integrate over the line
float tx,ty,tz;
float sum=0;
float i;
// Because I have no idea how to efficiently cutoff the legth path in 3D, a very upper limit is computed (see maxdistanceCuboid)
// for the 3D case. However it would be bad to lose performance in the 3D case
// TODO: can ge really improve this?
if (sphericalrotation){
if ((2*DSO/fminf(fminf(geo.dVoxelX,geo.dVoxelY),geo.dVoxelZ)+cropdist_init)/geo.accuracy < length)
length=ceilf((2*DSO/fminf(fminf(geo.dVoxelX,geo.dVoxelY),geo.dVoxelZ)+cropdist_init)/geo.accuracy);
}
else{
if ((2*DSO/fminf(geo.dVoxelX,geo.dVoxelY)+cropdist_init)/geo.accuracy < length)
length=ceilf((2*DSO/fminf(geo.dVoxelX,geo.dVoxelY)+cropdist_init)/geo.accuracy);
}
//Length is not actually a length, but the amount of memreads with given accuracy ("samples per voxel")
for (i=floorf(cropdist_init/geo.accuracy); i<=length; i=i+1){
tx=vectX*i+source.x;
ty=vectY*i+source.y;
tz=vectZ*i+source.z;
sum += tex3D<float>(tex, tx+0.5f, ty+0.5f, tz+0.5f); // this line is 94% of time.
}
float deltalength=sqrtf((vectX*geo.dVoxelX)*(vectX*geo.dVoxelX)+
(vectY*geo.dVoxelY)*(vectY*geo.dVoxelY)+
(vectZ*geo.dVoxelZ)*(vectZ*geo.dVoxelZ) );
detector[idx]=sum*deltalength;
}
// legnth(angles)=3 x nagnles, as we have roll, pitch, yaw.
int interpolation_projection(float * img, Geometry geo, float** result,float const * const angles,int nangles, const GpuIds& gpuids){
// Prepare for MultiGPU
int deviceCount = gpuids.GetLength();
cudaCheckErrors("Device query fail");
if (deviceCount == 0) {
mexErrMsgIdAndTxt("Ax:Interpolated_projection:GPUselect","There are no available device(s) that support CUDA\n");
}
//
// CODE assumes
// 1.-All available devices are usable by this code
// 2.-All available devices are equal, they are the same machine (warning thrown)
// Check the available devices, and if they are the same
if (!gpuids.AreEqualDevices()) {
mexWarnMsgIdAndTxt("Ax:Interpolated_projection:GPUselect","Detected one (or more) different GPUs.\n This code is not smart enough to separate the memory GPU wise if they have different computational times or memory limits.\n First GPU parameters used. If the code errors you might need to change the way GPU selection is performed.");
}
int dev;
// Check free memory
size_t mem_GPU_global;
checkFreeMemory(gpuids,&mem_GPU_global);
// printf("geo.nDetec (U, V) = %d, %d\n", geo.nDetecU, geo.nDetecV);
size_t mem_image=(unsigned long long)geo.nVoxelX*(unsigned long long)geo.nVoxelY*(unsigned long long)geo.nVoxelZ*sizeof(float);
size_t mem_proj =(unsigned long long)geo.nDetecU*(unsigned long long)geo.nDetecV * sizeof(float);
// Does everything fit in the GPUs?
const bool fits_in_memory = mem_image+2*PROJ_PER_BLOCK*mem_proj<mem_GPU_global;
unsigned int splits=1;
if (!fits_in_memory) {
// Nope nope.
// approx free memory we have. We already have left some extra 5% free for internal stuff
// we need a second projection memory to combine multi-GPU stuff.
size_t mem_free=mem_GPU_global-4*PROJ_PER_BLOCK*mem_proj;
splits=mem_image/mem_free+1;// Ceil of the truncation
}
Geometry* geoArray = (Geometry*)malloc(splits*sizeof(Geometry));
splitImageInterp(splits,geo,geoArray,nangles);
// Allocate auiliary memory for projections on the GPU to accumulate partial results
float ** dProjection_accum;
size_t num_bytes_proj = PROJ_PER_BLOCK*geo.nDetecU*geo.nDetecV * sizeof(float);
if (!fits_in_memory){
dProjection_accum=(float**)malloc(2*deviceCount*sizeof(float*));
for (dev = 0; dev < deviceCount; dev++) {
hipSetDevice(gpuids[dev]);
for (int i = 0; i < 2; ++i){
hipMalloc((void**)&dProjection_accum[dev*2+i], num_bytes_proj);
hipMemset(dProjection_accum[dev*2+i],0,num_bytes_proj);
cudaCheckErrors("cudaMallocauxiliarty projections fail");
}
}
}
// This is happening regarthless if the image fits on memory
float** dProjection=(float**)malloc(2*deviceCount*sizeof(float*));
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(gpuids[dev]);
for (int i = 0; i < 2; ++i){
hipMalloc((void**)&dProjection[dev*2+i], num_bytes_proj);
hipMemset(dProjection[dev*2+i] ,0,num_bytes_proj);
cudaCheckErrors("hipMalloc projections fail");
}
}
//Pagelock memory for synchronous copy.
// Lets try to make the host memory pinned:
// We laredy queried the GPU and assuemd they are the same, thus should have the same attributes.
int isHostRegisterSupported = 0;
#if CUDART_VERSION >= 9020
hipDeviceGetAttribute(&isHostRegisterSupported,hipDeviceAttributeHostRegisterSupported,gpuids[0]);
#endif
// empirical testing shows that when the image split is smaller than 1 (also implies the image is not very big), the time to
// pin the memory is greater than the lost time in Synchronously launching the memcpys. This is only worth it when the image is too big.
#ifndef NO_PINNED_MEMORY
if (isHostRegisterSupported & splits>1){
hipHostRegister(img, (size_t)geo.nVoxelX*(size_t)geo.nVoxelY*(size_t)geo.nVoxelZ*(size_t)sizeof(float),hipHostRegisterPortable);
}
cudaCheckErrors("Error pinning memory");
#endif
Point3D source, deltaU, deltaV, uvOrigin;
Point3D* projParamsArrayHost = 0;
hipHostMalloc((void**)&projParamsArrayHost,4*PROJ_PER_BLOCK*sizeof(Point3D));
float* projFloatsArrayHost = 0;
hipHostMalloc((void**)&projFloatsArrayHost,2*PROJ_PER_BLOCK*sizeof(float));
cudaCheckErrors("Error allocating auxiliary constant memory");
// Create Streams for overlapping memcopy and compute
int nStream_device=2;
int nStreams=deviceCount*nStream_device;
hipStream_t* stream=(hipStream_t*)malloc(nStreams*sizeof(hipStream_t));
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(gpuids[dev]);
for (int i = 0; i < nStream_device; ++i){
hipStreamCreate(&stream[i+dev*nStream_device]);
}
}
cudaCheckErrors("Stream creation fail");
int nangles_device=(nangles+deviceCount-1)/deviceCount;
int nangles_last_device=(nangles-(deviceCount-1)*nangles_device);
unsigned int noOfKernelCalls = (nangles_device+PROJ_PER_BLOCK-1)/PROJ_PER_BLOCK; // We'll take care of bounds checking inside the loop if nalpha is not divisible by PROJ_PER_BLOCK
unsigned int noOfKernelCallsLastDev = (nangles_last_device+PROJ_PER_BLOCK-1)/PROJ_PER_BLOCK; // we will use this in the memory management.
int projection_this_block;
hipTextureObject_t *texImg = new hipTextureObject_t[deviceCount];
hipArray **d_cuArrTex = new hipArray*[deviceCount];
for (unsigned int sp=0;sp<splits;sp++){
// Create texture objects for all GPUs
size_t linear_idx_start;
// They are all the same size, except the last one.
linear_idx_start= (size_t)sp*(size_t)geoArray[0].nVoxelX*(size_t)geoArray[0].nVoxelY*(size_t)geoArray[0].nVoxelZ;
CreateTextureInterp(gpuids,&img[linear_idx_start],geoArray[sp],d_cuArrTex,texImg,!sp);
cudaCheckErrors("Texture object creation fail");
int divU,divV;
divU=PIXEL_SIZE_BLOCK;
divV=PIXEL_SIZE_BLOCK;
dim3 grid((geoArray[sp].nDetecU+divU-1)/divU,(geoArray[0].nDetecV+divV-1)/divV,1);
dim3 block(divU,divV,PROJ_PER_BLOCK);
unsigned int proj_global;
float maxdist;
// Now that we have prepared the image (piece of image) and parameters for kernels
// we project for all angles.
for (unsigned int i=0; i<noOfKernelCalls; i++) {
for (dev=0;dev<deviceCount;dev++){
float is_spherical=0;
hipSetDevice(gpuids[dev]);
for(unsigned int j=0; j<PROJ_PER_BLOCK; j++){
proj_global=(i*PROJ_PER_BLOCK+j)+dev*nangles_device;
if (proj_global>=nangles)
break;
if ((i*PROJ_PER_BLOCK+j)>=nangles_device)
break;
geoArray[sp].alpha=angles[proj_global*3];
geoArray[sp].theta=angles[proj_global*3+1];
geoArray[sp].psi =angles[proj_global*3+2];
is_spherical+=abs(geoArray[sp].theta)+abs(geoArray[sp].psi);
//precomute distances for faster execution
maxdist=maxdistanceCuboid(geoArray[sp],proj_global);
//Precompute per angle constant stuff for speed
computeDeltas(geoArray[sp], proj_global, &uvOrigin, &deltaU, &deltaV, &source);
//Ray tracing!
projParamsArrayHost[4*j]=uvOrigin; // 6*j because we have 6 Point3D values per projection
projParamsArrayHost[4*j+1]=deltaU;
projParamsArrayHost[4*j+2]=deltaV;
projParamsArrayHost[4*j+3]=source;
projFloatsArrayHost[2*j]=geo.DSO[proj_global];
projFloatsArrayHost[2*j+1]=floor(maxdist);
}
hipMemcpyToSymbolAsync(projParamsArrayDev, projParamsArrayHost, sizeof(Point3D)*4*PROJ_PER_BLOCK,0,hipMemcpyHostToDevice,stream[dev*nStream_device]);
hipMemcpyToSymbolAsync(projFloatsArrayDev, projFloatsArrayHost, sizeof(float)*2*PROJ_PER_BLOCK,0,hipMemcpyHostToDevice,stream[dev*nStream_device]);
hipStreamSynchronize(stream[dev*nStream_device]);
//TODO: we could do this around X and Y axis too, but we would need to compute the new axis of rotation (not possible to know from jsut the angles)
if (!is_spherical){
hipLaunchKernelGGL(( kernelPixelDetector<false>), dim3(grid),dim3(block),0,stream[dev*nStream_device], geoArray[sp],dProjection[(i%2)+dev*2],i,nangles_device,texImg[dev]);
}
else{
hipLaunchKernelGGL(( kernelPixelDetector<true>) , dim3(grid),dim3(block),0,stream[dev*nStream_device], geoArray[sp],dProjection[(i%2)+dev*2],i,nangles_device,texImg[dev]);
}
}
// Now that the computation is happening, we need to either prepare the memory for
// combining of the projections (splits>1) and start removing previous results.
// If our image does not fit in memory then we need to make sure we accumulate previous results too.
// This is done in 2 steps:
// 1)copy previous results back into GPU
// 2)accumulate with current results
// The code to take them out is the same as when there are no splits needed
if( !fits_in_memory&&sp>0)
{
// 1) grab previous results and put them in the auxiliary variable dProjection_accum
for (dev = 0; dev < deviceCount; dev++)
{
hipSetDevice(gpuids[dev]);
//Global index of FIRST projection on this set on this GPU
proj_global=i*PROJ_PER_BLOCK+dev*nangles_device;
if(proj_global>=nangles)
break;
// Unless its the last projection set, we have PROJ_PER_BLOCK angles. Otherwise...
if(i+1==noOfKernelCalls) //is it the last block?
projection_this_block=min(nangles_device-(noOfKernelCalls-1)*PROJ_PER_BLOCK, //the remaining angles that this GPU had to do (almost never PROJ_PER_BLOCK)
nangles-proj_global); //or whichever amount is left to finish all (this is for the last GPU)
else
projection_this_block=PROJ_PER_BLOCK;
hipMemcpyAsync(dProjection_accum[(i%2)+dev*2], result[proj_global], projection_this_block*geo.nDetecV*geo.nDetecU*sizeof(float), hipMemcpyHostToDevice,stream[dev*2+1]);
}
// 2) take the results from current compute call and add it to the code in execution.
for (dev = 0; dev < deviceCount; dev++)
{
hipSetDevice(gpuids[dev]);
//Global index of FIRST projection on this set on this GPU
proj_global=i*PROJ_PER_BLOCK+dev*nangles_device;
if(proj_global>=nangles)
break;
// Unless its the last projection set, we have PROJ_PER_BLOCK angles. Otherwise...
if(i+1==noOfKernelCalls) //is it the last block?
projection_this_block=min(nangles_device-(noOfKernelCalls-1)*PROJ_PER_BLOCK, //the remaining angles that this GPU had to do (almost never PROJ_PER_BLOCK)
nangles-proj_global); //or whichever amount is left to finish all (this is for the last GPU)
else
projection_this_block=PROJ_PER_BLOCK;
hipStreamSynchronize(stream[dev*2+1]); // wait until copy is finished
hipLaunchKernelGGL(( vecAddInPlaceInterp), dim3((geo.nDetecU*geo.nDetecV*projection_this_block+MAXTREADS-1)/MAXTREADS),dim3(MAXTREADS),0,stream[dev*2], dProjection[(i%2)+dev*2],dProjection_accum[(i%2)+dev*2],(unsigned long)geo.nDetecU*geo.nDetecV*projection_this_block);
}
} // end accumulation case, where the image needs to be split
// Now, lets get out the projections from the previous execution of the kernels.
if (i>0)
{
for (dev = 0; dev < deviceCount; dev++)
{
hipSetDevice(gpuids[dev]);
//Global index of FIRST projection on previous set on this GPU
proj_global=(i-1)*PROJ_PER_BLOCK+dev*nangles_device;
if (dev+1==deviceCount) { //is it the last device?
// projections assigned to this device is >=nangles_device-(deviceCount-1) and < nangles_device
if (i-1 < noOfKernelCallsLastDev) {
// The previous set(block) was not empty.
projection_this_block=min(PROJ_PER_BLOCK, nangles-proj_global);
}
else {
// The previous set was empty.
// This happens if deviceCount > PROJ_PER_BLOCK+1.
// e.g. PROJ_PER_BLOCK = 9, deviceCount = 11, nangles = 199.
// e.g. PROJ_PER_BLOCK = 1, deviceCount = 3, nangles = 7.
break;
}
}
else {
projection_this_block=PROJ_PER_BLOCK;
}
hipMemcpyAsync(result[proj_global], dProjection[(int)(!(i%2))+dev*2], projection_this_block*geo.nDetecV*geo.nDetecU*sizeof(float), hipMemcpyDeviceToHost,stream[dev*2+1]);
}
}
// Make sure Computation on kernels has finished before we launch the next batch.
for (dev = 0; dev < deviceCount; dev++)
{
hipSetDevice(gpuids[dev]);
hipStreamSynchronize(stream[dev*2]);
}
} // End noOfKernelCalls (i) loop.
// We still have the last set of projections to get out of GPUs
for (dev = 0; dev < deviceCount; dev++)
{
hipSetDevice(gpuids[dev]);
//Global index of FIRST projection on this set on this GPU
proj_global=(noOfKernelCalls-1)*PROJ_PER_BLOCK+dev*nangles_device;
if(proj_global>=nangles)
break;
// How many projections are left here?
projection_this_block=min(nangles_device-(noOfKernelCalls-1)*PROJ_PER_BLOCK, //the remaining angles that this GPU had to do (almost never PROJ_PER_BLOCK)
nangles-proj_global); //or whichever amount is left to finish all (this is for the last GPU)
hipDeviceSynchronize(); //Not really necesary, but just in case, we los nothing.
cudaCheckErrors("Error at copying the last set of projections out (or in the previous copy)");
hipMemcpyAsync(result[proj_global], dProjection[(int)(!(noOfKernelCalls%2))+dev*2], projection_this_block*geo.nDetecV*geo.nDetecU*sizeof(float), hipMemcpyDeviceToHost,stream[dev*2+1]);
}
// Make sure everyone has done their bussiness before the next image split:
for (dev = 0; dev < deviceCount; dev++)
{
hipSetDevice(gpuids[dev]);
hipDeviceSynchronize();
}
} // End image split loop.
cudaCheckErrors("Main loop fail");
///////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(gpuids[dev]);
hipDestroyTextureObject(texImg[dev]);
hipFreeArray(d_cuArrTex[dev]);
}
delete[] texImg; texImg = 0;
delete[] d_cuArrTex; d_cuArrTex = 0;
// Freeing Stage
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(gpuids[dev]);
hipFree(dProjection[dev*2]);
hipFree(dProjection[dev*2+1]);
}
free(dProjection);
if(!fits_in_memory){
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(gpuids[dev]);
hipFree(dProjection_accum[dev*2]);
hipFree(dProjection_accum[dev*2+1]);
}
free(dProjection_accum);
}
freeGeoArray(splits,geoArray);
hipHostFree(projParamsArrayHost);
hipHostFree(projFloatsArrayHost);
for (int i = 0; i < nStreams; ++i)
hipStreamDestroy(stream[i]) ;
#ifndef NO_PINNED_MEMORY
if (isHostRegisterSupported & splits>1){
hipHostUnregister(img);
}
#endif
cudaCheckErrors("hipFree fail");
// hipDeviceReset();
return 0;
}
void CreateTextureInterp(const GpuIds& gpuids,const float* imagedata,Geometry geo,hipArray** d_cuArrTex, hipTextureObject_t *texImage,bool allocate)
{
const unsigned int num_devices = gpuids.GetLength();
//size_t size_image=geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ;
const hipExtent extent = make_hipExtent(geo.nVoxelX, geo.nVoxelY, geo.nVoxelZ);
if(allocate){
for (unsigned int dev = 0; dev < num_devices; dev++){
hipSetDevice(gpuids[dev]);
//hipArray Descriptor
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
//cuda Array
hipMalloc3DArray(&d_cuArrTex[dev], &channelDesc, extent);
cudaCheckErrors("Texture memory allocation fail");
}
}
for (unsigned int dev = 0; dev < num_devices; dev++){
hipMemcpy3DParms copyParams = {0};
hipSetDevice(gpuids[dev]);
//Array creation
copyParams.srcPtr = make_hipPitchedPtr((void *)imagedata, extent.width*sizeof(float), extent.width, extent.height);
copyParams.dstArray = d_cuArrTex[dev];
copyParams.extent = extent;
copyParams.kind = hipMemcpyHostToDevice;
hipMemcpy3DAsync(©Params);
//cudaCheckErrors("Texture memory data copy fail");
//Array creation End
}
for (unsigned int dev = 0; dev < num_devices; dev++){
hipSetDevice(gpuids[dev]);
hipResourceDesc texRes;
memset(&texRes, 0, sizeof(hipResourceDesc));
texRes.resType = hipResourceTypeArray;
texRes.res.array.array = d_cuArrTex[dev];
hipTextureDesc texDescr;
memset(&texDescr, 0, sizeof(hipTextureDesc));
texDescr.normalizedCoords = false;
if (geo.accuracy>1){
texDescr.filterMode = hipFilterModePoint;
geo.accuracy=1;
}
else{
texDescr.filterMode = hipFilterModeLinear;
}
texDescr.addressMode[0] = hipAddressModeBorder;
texDescr.addressMode[1] = hipAddressModeBorder;
texDescr.addressMode[2] = hipAddressModeBorder;
texDescr.readMode = hipReadModeElementType;
hipCreateTextureObject(&texImage[dev], &texRes, &texDescr, NULL);
cudaCheckErrors("Texture object creation fail");
}
}
/* This code generates the geometries needed to split the image properly in
* cases where the entire image does not fit in the memory of the GPU
**/
void splitImageInterp(unsigned int splits,Geometry geo,Geometry* geoArray, unsigned int nangles){
unsigned long splitsize=(geo.nVoxelZ+splits-1)/splits;// ceil if not divisible
for(unsigned int sp=0;sp<splits;sp++){
geoArray[sp]=geo;
// All of them are splitsize, but the last one, possible
geoArray[sp].nVoxelZ=((sp+1)*splitsize<geo.nVoxelZ)? splitsize: geo.nVoxelZ-splitsize*sp;
geoArray[sp].sVoxelZ= geoArray[sp].nVoxelZ* geoArray[sp].dVoxelZ;
// We need to redefine the offsets, as now each subimage is not aligned in the origin.
geoArray[sp].offOrigZ=(float *)malloc(nangles*sizeof(float));
for (unsigned int i=0;i<nangles;i++){
geoArray[sp].offOrigZ[i]=geo.offOrigZ[i]-geo.sVoxelZ/2+sp*geoArray[0].sVoxelZ+geoArray[sp].sVoxelZ/2;
}
}
}
/* This code precomputes The location of the source and the Delta U and delta V (in the warped space)
* to compute the locations of the x-rays. While it seems verbose and overly-optimized,
* it does saves about 30% of each of the kernel calls. Thats something!
**/
void computeDeltas(Geometry geo,unsigned int i, Point3D* uvorigin, Point3D* deltaU, Point3D* deltaV, Point3D* source){
Point3D S;
S.x=geo.DSO[i];
S.y=0;
S.z=0;
//End point
Point3D P,Pu0,Pv0;
P.x =-(geo.DSD[i]-geo.DSO[i]); P.y = geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); P.z = geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0);
Pu0.x=-(geo.DSD[i]-geo.DSO[i]); Pu0.y= geo.dDetecU*(1-((float)geo.nDetecU/2)+0.5); Pu0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0);
Pv0.x=-(geo.DSD[i]-geo.DSO[i]); Pv0.y= geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); Pv0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-1);
// Geomtric trasnformations:
// Now we have the Real world (OXYZ) coordinates of the bottom corner and its two neighbours.
// The obkjective is to get a position of the detector in a coordinate system where:
// 1-units are voxel size (in each direction can be different)
// 2-The image has the its first voxel at (0,0,0)
// 3-The image never rotates
// To do that, we need to compute the "deltas" the detector, or "by how much
// (in new xyz) does the voxels change when and index is added". To do that
// several geometric steps needs to be changed
//1.Roll,pitch,jaw
// The detector can have a small rotation.
// according to
//"A geometric calibration method for cone beam CT systems" Yang K1, Kwan AL, Miller DF, Boone JM. Med Phys. 2006 Jun;33(6):1695-706.
// Only the Z rotation will have a big influence in the image quality when they are small.
// Still all rotations are supported
// To roll pitch jaw, the detector has to be in centered in OXYZ.
P.x=0;Pu0.x=0;Pv0.x=0;
// Roll pitch yaw
rollPitchYaw(geo,i,&P);
rollPitchYaw(geo,i,&Pu0);
rollPitchYaw(geo,i,&Pv0);
//Now ltes translate the detector coordinates to DOD (original position on real coordinate system:
P.x=P.x-(geo.DSD[i]-geo.DSO[i]);
Pu0.x=Pu0.x-(geo.DSD[i]-geo.DSO[i]);
Pv0.x=Pv0.x-(geo.DSD[i]-geo.DSO[i]);
//2: Offset detector
//S doesnt need to chagne
//3: Rotate around RZ RY RZ
Point3D Pfinal, Pfinalu0, Pfinalv0;
Pfinal.x =P.x;
Pfinal.y =P.y +geo.offDetecU[i]; Pfinal.z =P.z +geo.offDetecV[i];
Pfinalu0.x=Pu0.x;
Pfinalu0.y=Pu0.y +geo.offDetecU[i]; Pfinalu0.z =Pu0.z +geo.offDetecV[i];
Pfinalv0.x=Pv0.x;
Pfinalv0.y=Pv0.y +geo.offDetecU[i]; Pfinalv0.z =Pv0.z +geo.offDetecV[i];
eulerZYZ(geo,&Pfinal);
eulerZYZ(geo,&Pfinalu0);
eulerZYZ(geo,&Pfinalv0);
eulerZYZ(geo,&S);
//3: Offset image (instead of offseting image, -offset everything else)
Pfinal.x =Pfinal.x-geo.offOrigX[i]; Pfinal.y =Pfinal.y-geo.offOrigY[i]; Pfinal.z =Pfinal.z-geo.offOrigZ[i];
Pfinalu0.x=Pfinalu0.x-geo.offOrigX[i]; Pfinalu0.y=Pfinalu0.y-geo.offOrigY[i]; Pfinalu0.z=Pfinalu0.z-geo.offOrigZ[i];
Pfinalv0.x=Pfinalv0.x-geo.offOrigX[i]; Pfinalv0.y=Pfinalv0.y-geo.offOrigY[i]; Pfinalv0.z=Pfinalv0.z-geo.offOrigZ[i];
S.x=S.x-geo.offOrigX[i]; S.y=S.y-geo.offOrigY[i]; S.z=S.z-geo.offOrigZ[i];
// As we want the (0,0,0) to be in a corner of the image, we need to translate everything (after rotation);
Pfinal.x =Pfinal.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinal.y =Pfinal.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinal.z =Pfinal.z +geo.sVoxelZ/2-geo.dVoxelZ/2;
Pfinalu0.x=Pfinalu0.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinalu0.y=Pfinalu0.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinalu0.z=Pfinalu0.z+geo.sVoxelZ/2-geo.dVoxelZ/2;
Pfinalv0.x=Pfinalv0.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinalv0.y=Pfinalv0.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinalv0.z=Pfinalv0.z+geo.sVoxelZ/2-geo.dVoxelZ/2;
S.x =S.x+geo.sVoxelX/2-geo.dVoxelX/2; S.y =S.y+geo.sVoxelY/2-geo.dVoxelY/2; S.z =S.z +geo.sVoxelZ/2-geo.dVoxelZ/2;
//4. Scale everything so dVoxel==1
Pfinal.x =Pfinal.x/geo.dVoxelX; Pfinal.y =Pfinal.y/geo.dVoxelY; Pfinal.z =Pfinal.z/geo.dVoxelZ;
Pfinalu0.x=Pfinalu0.x/geo.dVoxelX; Pfinalu0.y=Pfinalu0.y/geo.dVoxelY; Pfinalu0.z=Pfinalu0.z/geo.dVoxelZ;
Pfinalv0.x=Pfinalv0.x/geo.dVoxelX; Pfinalv0.y=Pfinalv0.y/geo.dVoxelY; Pfinalv0.z=Pfinalv0.z/geo.dVoxelZ;
S.x =S.x/geo.dVoxelX; S.y =S.y/geo.dVoxelY; S.z =S.z/geo.dVoxelZ;
//mexPrintf("COR: %f \n",geo.COR[i]);
//5. apply COR. Wherever everything was, now its offesetd by a bit.
// Only wors for standard rotaiton, not aribtary axis rotation.
float CORx, CORy;
CORx=-geo.COR[i]*sin(geo.alpha)/geo.dVoxelX;
CORy= geo.COR[i]*cos(geo.alpha)/geo.dVoxelY;
Pfinal.x+=CORx; Pfinal.y+=CORy;
Pfinalu0.x+=CORx; Pfinalu0.y+=CORy;
Pfinalv0.x+=CORx; Pfinalv0.y+=CORy;
S.x+=CORx; S.y+=CORy;
// return
*uvorigin=Pfinal;
deltaU->x=Pfinalu0.x-Pfinal.x;
deltaU->y=Pfinalu0.y-Pfinal.y;
deltaU->z=Pfinalu0.z-Pfinal.z;
deltaV->x=Pfinalv0.x-Pfinal.x;
deltaV->y=Pfinalv0.y-Pfinal.y;
deltaV->z=Pfinalv0.z-Pfinal.z;
*source=S;
}
float maxdistanceCuboid(Geometry geo,unsigned int i){
///////////
// Compute initial "t" so we access safely as less as out of bounds as possible.
//////////
float maxCubX,maxCubY,maxCubZ;
// Forgetting Z, compute mas distance: diagonal+offset
maxCubX=(geo.nVoxelX/2+ abs(geo.offOrigX[i])/geo.dVoxelX);
maxCubY=(geo.nVoxelY/2+ abs(geo.offOrigY[i])/geo.dVoxelY);
maxCubZ=(geo.nVoxelZ/2+ abs(geo.offOrigZ[i])/geo.dVoxelZ);
float a,b;
a=geo.DSO[i]/geo.dVoxelX;
b=geo.DSO[i]/geo.dVoxelY;
// As the return of this value is in "voxel space", the source may have an elliptical curve.
// The distance returned is the safe distance that can be skipped for a given angle alpha, before we need to start sampling.
if (geo.theta==0.0f & geo.psi==0.0f) // Special case, it will make the code faster
return max(a*b/sqrt(a*a*sin(geo.alpha)*sin(geo.alpha)+b*b*cos(geo.alpha)*cos(geo.alpha))-
sqrt(maxCubX*maxCubX+maxCubY*maxCubY),0.0f);
//TODO: think of more special cases?
return max(geo.DSO[i]/max(max(geo.dVoxelX,geo.dVoxelY),geo.dVoxelZ)-sqrt(maxCubX*maxCubX+maxCubY*maxCubY+maxCubZ*maxCubZ),0.0f);
}
void rollPitchYaw(Geometry geo,unsigned int i, Point3D* point){
Point3D auxPoint;
auxPoint.x=point->x;
auxPoint.y=point->y;
auxPoint.z=point->z;
point->x=cos(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x
+(cos(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) - sin(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y
+(cos(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) + sin(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.z;
point->y=sin(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x
+(sin(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) + cos(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y
+(sin(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) - cos(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.z;
point->z=-sin(geo.dPitch[i])*auxPoint.x
+cos(geo.dPitch[i])*sin(geo.dYaw[i])*auxPoint.y
+cos(geo.dPitch[i])*cos(geo.dYaw[i])*auxPoint.z;
}
void eulerZYZ(Geometry geo, Point3D* point){
Point3D auxPoint;
auxPoint.x=point->x;
auxPoint.y=point->y;
auxPoint.z=point->z;
point->x=(+cos(geo.alpha)*cos(geo.theta)*cos(geo.psi)-sin(geo.alpha)*sin(geo.psi))*auxPoint.x+
(-cos(geo.alpha)*cos(geo.theta)*sin(geo.psi)-sin(geo.alpha)*cos(geo.psi))*auxPoint.y+
cos(geo.alpha)*sin(geo.theta)*auxPoint.z;
point->y=(+sin(geo.alpha)*cos(geo.theta)*cos(geo.psi)+cos(geo.alpha)*sin(geo.psi))*auxPoint.x+
(-sin(geo.alpha)*cos(geo.theta)*sin(geo.psi)+cos(geo.alpha)*cos(geo.psi))*auxPoint.y+
sin(geo.alpha)*sin(geo.theta)*auxPoint.z;
point->z=-sin(geo.theta)*cos(geo.psi)*auxPoint.x+
sin(geo.theta)*sin(geo.psi)*auxPoint.y+
cos(geo.theta)*auxPoint.z;
}
//______________________________________________________________________________
//
// Function: freeGeoArray
//
// Description: Frees the memory from the geometry array for multiGPU.
//______________________________________________________________________________
void freeGeoArray(unsigned int splits,Geometry* geoArray){
for(unsigned int sp=0;sp<splits;sp++){
free(geoArray[sp].offOrigZ);
}
free(geoArray);
}
//______________________________________________________________________________
//
// Function: checkFreeMemory
//
// Description: check available memory on devices
//______________________________________________________________________________
void checkFreeMemory(const GpuIds& gpuids, size_t *mem_GPU_global){
size_t memfree;
size_t memtotal;
int deviceCount = gpuids.GetLength();
for (int dev = 0; dev < deviceCount; dev++){
hipSetDevice(gpuids[dev]);
hipMemGetInfo(&memfree,&memtotal);
if(dev==0) *mem_GPU_global=memfree;
if(memfree<memtotal/2){
mexErrMsgIdAndTxt("tvDenoise:tvdenoising:GPU","One (or more) of your GPUs is being heavily used by another program (possibly graphics-based).\n Free the GPU to run TIGRE\n");
}
cudaCheckErrors("Check mem error");
*mem_GPU_global=(memfree<*mem_GPU_global)?memfree:*mem_GPU_global;
}
*mem_GPU_global=(size_t)((double)*mem_GPU_global*0.95);
//*mem_GPU_global= insert your known number here, in bytes.
}
|
6e9c968333779aa926a74c249f92d4e4492acf00.cu
|
/*-------------------------------------------------------------------------
*
* CUDA functions for texture-memory interpolation based projection
*
* This file has the necesary fucntiosn to perform X-ray CBCT projection
* operation given a geaometry, angles and image. It uses the 3D texture
* memory linear interpolation to uniformily sample a path to integrate the
* X-rays.
*
* CODE by Ander Biguri
* Sepideh Hatamikia (arbitrary rotation)
* ---------------------------------------------------------------------------
* ---------------------------------------------------------------------------
* Copyright (c) 2015, University of Bath and CERN- European Organization for
* Nuclear Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------
*
* Contact: tigre.toolbox@gmail.com
* Codes : https://github.com/CERN/TIGRE
* ---------------------------------------------------------------------------
*/
#include <algorithm>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include "ray_interpolated_projection.hpp"
#include "TIGRE_common.hpp"
#include <math.h>
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
mexPrintf("%s \n",msg);\
cudaDeviceReset();\
mexErrMsgIdAndTxt("TIGRE:Ax:interpolated",cudaGetErrorString(__err));\
} \
} while (0)
#define MAXTREADS 1024
#define PROJ_PER_BLOCK 9
#define PIXEL_SIZE_BLOCK 9
/*GEOMETRY DEFINITION
*
* Detector plane, behind
* |-----------------------------|
* | |
* | |
* | |
* | |
* | +--------+ |
* | / /| |
* A Z | / / |*D |
* | | +--------+ | |
* | | | | | |
* | | | *O | + |
* --->y | | | / |
* / | | |/ |
* V X | +--------+ |
* |-----------------------------|
*
* *S
*
*
*
*
*
**/
void CreateTextureInterp(const GpuIds& gpuids,const float* imagedata,Geometry geo,cudaArray** d_cuArrTex, cudaTextureObject_t *texImage,bool allocate);
__constant__ Point3D projParamsArrayDev[4*PROJ_PER_BLOCK]; // Dev means it is on device
__constant__ float projFloatsArrayDev[2*PROJ_PER_BLOCK]; // Dev means it is on device
__global__ void vecAddInPlaceInterp(float *a, float *b, unsigned long n)
{
int idx = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (idx < n)
a[idx] = a[idx] + b[idx];
}
template<bool sphericalrotation>
__global__ void kernelPixelDetector( Geometry geo,
float* detector,
const int currProjSetNumber,
const int totalNoOfProjections,
cudaTextureObject_t tex){
unsigned long long u = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long v = blockIdx.y * blockDim.y + threadIdx.y;
unsigned long long projNumber=threadIdx.z;
if (u>= geo.nDetecU || v>= geo.nDetecV || projNumber>=PROJ_PER_BLOCK)
return;
#if IS_FOR_MATLAB_TIGRE
size_t idx = (size_t)(u * (unsigned long long)geo.nDetecV + v)+ projNumber*(unsigned long long)geo.nDetecV *(unsigned long long)geo.nDetecU ;
#else
size_t idx = (size_t)(v * (unsigned long long)geo.nDetecU + u)+ projNumber*(unsigned long long)geo.nDetecV *(unsigned long long)geo.nDetecU ;
#endif
unsigned long indAlpha = currProjSetNumber*PROJ_PER_BLOCK+projNumber; // This is the ABSOLUTE projection number in the projection array
if(indAlpha>=totalNoOfProjections)
return;
Point3D uvOrigin = projParamsArrayDev[4*projNumber]; // 6*projNumber because we have 6 Point3D values per projection
Point3D deltaU = projParamsArrayDev[4*projNumber+1];
Point3D deltaV = projParamsArrayDev[4*projNumber+2];
Point3D source = projParamsArrayDev[4*projNumber+3];
float DSO = projFloatsArrayDev[2*projNumber+0];
float cropdist_init = projFloatsArrayDev[2*projNumber+1];
/////// Get coordinates XYZ of pixel UV
unsigned long pixelV = geo.nDetecV-v-1;
unsigned long pixelU = u;
float vectX,vectY,vectZ;
Point3D P;
P.x=(uvOrigin.x+pixelU*deltaU.x+pixelV*deltaV.x);
P.y=(uvOrigin.y+pixelU*deltaU.y+pixelV*deltaV.y);
P.z=(uvOrigin.z+pixelU*deltaU.z+pixelV*deltaV.z);
// Length is the ray length in normalized space
float length=__fsqrt_rd((source.x-P.x)*(source.x-P.x)+(source.y-P.y)*(source.y-P.y)+(source.z-P.z)*(source.z-P.z));
//now legth is an integer of Nsamples that are required on this line
length=ceilf(__fdividef(length,geo.accuracy));//Divide the directional vector by an integer
vectX=__fdividef(P.x -source.x,length);
vectY=__fdividef(P.y -source.y,length);
vectZ=__fdividef(P.z -source.z,length);
// //Integrate over the line
float tx,ty,tz;
float sum=0;
float i;
// Because I have no idea how to efficiently cutoff the legth path in 3D, a very upper limit is computed (see maxdistanceCuboid)
// for the 3D case. However it would be bad to lose performance in the 3D case
// TODO: can ge really improve this?
if (sphericalrotation){
if ((2*DSO/fminf(fminf(geo.dVoxelX,geo.dVoxelY),geo.dVoxelZ)+cropdist_init)/geo.accuracy < length)
length=ceilf((2*DSO/fminf(fminf(geo.dVoxelX,geo.dVoxelY),geo.dVoxelZ)+cropdist_init)/geo.accuracy);
}
else{
if ((2*DSO/fminf(geo.dVoxelX,geo.dVoxelY)+cropdist_init)/geo.accuracy < length)
length=ceilf((2*DSO/fminf(geo.dVoxelX,geo.dVoxelY)+cropdist_init)/geo.accuracy);
}
//Length is not actually a length, but the amount of memreads with given accuracy ("samples per voxel")
for (i=floorf(cropdist_init/geo.accuracy); i<=length; i=i+1){
tx=vectX*i+source.x;
ty=vectY*i+source.y;
tz=vectZ*i+source.z;
sum += tex3D<float>(tex, tx+0.5f, ty+0.5f, tz+0.5f); // this line is 94% of time.
}
float deltalength=sqrtf((vectX*geo.dVoxelX)*(vectX*geo.dVoxelX)+
(vectY*geo.dVoxelY)*(vectY*geo.dVoxelY)+
(vectZ*geo.dVoxelZ)*(vectZ*geo.dVoxelZ) );
detector[idx]=sum*deltalength;
}
// legnth(angles)=3 x nagnles, as we have roll, pitch, yaw.
int interpolation_projection(float * img, Geometry geo, float** result,float const * const angles,int nangles, const GpuIds& gpuids){
// Prepare for MultiGPU
int deviceCount = gpuids.GetLength();
cudaCheckErrors("Device query fail");
if (deviceCount == 0) {
mexErrMsgIdAndTxt("Ax:Interpolated_projection:GPUselect","There are no available device(s) that support CUDA\n");
}
//
// CODE assumes
// 1.-All available devices are usable by this code
// 2.-All available devices are equal, they are the same machine (warning thrown)
// Check the available devices, and if they are the same
if (!gpuids.AreEqualDevices()) {
mexWarnMsgIdAndTxt("Ax:Interpolated_projection:GPUselect","Detected one (or more) different GPUs.\n This code is not smart enough to separate the memory GPU wise if they have different computational times or memory limits.\n First GPU parameters used. If the code errors you might need to change the way GPU selection is performed.");
}
int dev;
// Check free memory
size_t mem_GPU_global;
checkFreeMemory(gpuids,&mem_GPU_global);
// printf("geo.nDetec (U, V) = %d, %d\n", geo.nDetecU, geo.nDetecV);
size_t mem_image=(unsigned long long)geo.nVoxelX*(unsigned long long)geo.nVoxelY*(unsigned long long)geo.nVoxelZ*sizeof(float);
size_t mem_proj =(unsigned long long)geo.nDetecU*(unsigned long long)geo.nDetecV * sizeof(float);
// Does everything fit in the GPUs?
const bool fits_in_memory = mem_image+2*PROJ_PER_BLOCK*mem_proj<mem_GPU_global;
unsigned int splits=1;
if (!fits_in_memory) {
// Nope nope.
// approx free memory we have. We already have left some extra 5% free for internal stuff
// we need a second projection memory to combine multi-GPU stuff.
size_t mem_free=mem_GPU_global-4*PROJ_PER_BLOCK*mem_proj;
splits=mem_image/mem_free+1;// Ceil of the truncation
}
Geometry* geoArray = (Geometry*)malloc(splits*sizeof(Geometry));
splitImageInterp(splits,geo,geoArray,nangles);
// Allocate auiliary memory for projections on the GPU to accumulate partial results
float ** dProjection_accum;
size_t num_bytes_proj = PROJ_PER_BLOCK*geo.nDetecU*geo.nDetecV * sizeof(float);
if (!fits_in_memory){
dProjection_accum=(float**)malloc(2*deviceCount*sizeof(float*));
for (dev = 0; dev < deviceCount; dev++) {
cudaSetDevice(gpuids[dev]);
for (int i = 0; i < 2; ++i){
cudaMalloc((void**)&dProjection_accum[dev*2+i], num_bytes_proj);
cudaMemset(dProjection_accum[dev*2+i],0,num_bytes_proj);
cudaCheckErrors("cudaMallocauxiliarty projections fail");
}
}
}
// This is happening regarthless if the image fits on memory
float** dProjection=(float**)malloc(2*deviceCount*sizeof(float*));
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
for (int i = 0; i < 2; ++i){
cudaMalloc((void**)&dProjection[dev*2+i], num_bytes_proj);
cudaMemset(dProjection[dev*2+i] ,0,num_bytes_proj);
cudaCheckErrors("cudaMalloc projections fail");
}
}
//Pagelock memory for synchronous copy.
// Lets try to make the host memory pinned:
// We laredy queried the GPU and assuemd they are the same, thus should have the same attributes.
int isHostRegisterSupported = 0;
#if CUDART_VERSION >= 9020
cudaDeviceGetAttribute(&isHostRegisterSupported,cudaDevAttrHostRegisterSupported,gpuids[0]);
#endif
// empirical testing shows that when the image split is smaller than 1 (also implies the image is not very big), the time to
// pin the memory is greater than the lost time in Synchronously launching the memcpys. This is only worth it when the image is too big.
#ifndef NO_PINNED_MEMORY
if (isHostRegisterSupported & splits>1){
cudaHostRegister(img, (size_t)geo.nVoxelX*(size_t)geo.nVoxelY*(size_t)geo.nVoxelZ*(size_t)sizeof(float),cudaHostRegisterPortable);
}
cudaCheckErrors("Error pinning memory");
#endif
Point3D source, deltaU, deltaV, uvOrigin;
Point3D* projParamsArrayHost = 0;
cudaMallocHost((void**)&projParamsArrayHost,4*PROJ_PER_BLOCK*sizeof(Point3D));
float* projFloatsArrayHost = 0;
cudaMallocHost((void**)&projFloatsArrayHost,2*PROJ_PER_BLOCK*sizeof(float));
cudaCheckErrors("Error allocating auxiliary constant memory");
// Create Streams for overlapping memcopy and compute
int nStream_device=2;
int nStreams=deviceCount*nStream_device;
cudaStream_t* stream=(cudaStream_t*)malloc(nStreams*sizeof(cudaStream_t));
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
for (int i = 0; i < nStream_device; ++i){
cudaStreamCreate(&stream[i+dev*nStream_device]);
}
}
cudaCheckErrors("Stream creation fail");
int nangles_device=(nangles+deviceCount-1)/deviceCount;
int nangles_last_device=(nangles-(deviceCount-1)*nangles_device);
unsigned int noOfKernelCalls = (nangles_device+PROJ_PER_BLOCK-1)/PROJ_PER_BLOCK; // We'll take care of bounds checking inside the loop if nalpha is not divisible by PROJ_PER_BLOCK
unsigned int noOfKernelCallsLastDev = (nangles_last_device+PROJ_PER_BLOCK-1)/PROJ_PER_BLOCK; // we will use this in the memory management.
int projection_this_block;
cudaTextureObject_t *texImg = new cudaTextureObject_t[deviceCount];
cudaArray **d_cuArrTex = new cudaArray*[deviceCount];
for (unsigned int sp=0;sp<splits;sp++){
// Create texture objects for all GPUs
size_t linear_idx_start;
// They are all the same size, except the last one.
linear_idx_start= (size_t)sp*(size_t)geoArray[0].nVoxelX*(size_t)geoArray[0].nVoxelY*(size_t)geoArray[0].nVoxelZ;
CreateTextureInterp(gpuids,&img[linear_idx_start],geoArray[sp],d_cuArrTex,texImg,!sp);
cudaCheckErrors("Texture object creation fail");
int divU,divV;
divU=PIXEL_SIZE_BLOCK;
divV=PIXEL_SIZE_BLOCK;
dim3 grid((geoArray[sp].nDetecU+divU-1)/divU,(geoArray[0].nDetecV+divV-1)/divV,1);
dim3 block(divU,divV,PROJ_PER_BLOCK);
unsigned int proj_global;
float maxdist;
// Now that we have prepared the image (piece of image) and parameters for kernels
// we project for all angles.
for (unsigned int i=0; i<noOfKernelCalls; i++) {
for (dev=0;dev<deviceCount;dev++){
float is_spherical=0;
cudaSetDevice(gpuids[dev]);
for(unsigned int j=0; j<PROJ_PER_BLOCK; j++){
proj_global=(i*PROJ_PER_BLOCK+j)+dev*nangles_device;
if (proj_global>=nangles)
break;
if ((i*PROJ_PER_BLOCK+j)>=nangles_device)
break;
geoArray[sp].alpha=angles[proj_global*3];
geoArray[sp].theta=angles[proj_global*3+1];
geoArray[sp].psi =angles[proj_global*3+2];
is_spherical+=abs(geoArray[sp].theta)+abs(geoArray[sp].psi);
//precomute distances for faster execution
maxdist=maxdistanceCuboid(geoArray[sp],proj_global);
//Precompute per angle constant stuff for speed
computeDeltas(geoArray[sp], proj_global, &uvOrigin, &deltaU, &deltaV, &source);
//Ray tracing!
projParamsArrayHost[4*j]=uvOrigin; // 6*j because we have 6 Point3D values per projection
projParamsArrayHost[4*j+1]=deltaU;
projParamsArrayHost[4*j+2]=deltaV;
projParamsArrayHost[4*j+3]=source;
projFloatsArrayHost[2*j]=geo.DSO[proj_global];
projFloatsArrayHost[2*j+1]=floor(maxdist);
}
cudaMemcpyToSymbolAsync(projParamsArrayDev, projParamsArrayHost, sizeof(Point3D)*4*PROJ_PER_BLOCK,0,cudaMemcpyHostToDevice,stream[dev*nStream_device]);
cudaMemcpyToSymbolAsync(projFloatsArrayDev, projFloatsArrayHost, sizeof(float)*2*PROJ_PER_BLOCK,0,cudaMemcpyHostToDevice,stream[dev*nStream_device]);
cudaStreamSynchronize(stream[dev*nStream_device]);
//TODO: we could do this around X and Y axis too, but we would need to compute the new axis of rotation (not possible to know from jsut the angles)
if (!is_spherical){
kernelPixelDetector<false><<<grid,block,0,stream[dev*nStream_device]>>>(geoArray[sp],dProjection[(i%2)+dev*2],i,nangles_device,texImg[dev]);
}
else{
kernelPixelDetector<true> <<<grid,block,0,stream[dev*nStream_device]>>>(geoArray[sp],dProjection[(i%2)+dev*2],i,nangles_device,texImg[dev]);
}
}
// Now that the computation is happening, we need to either prepare the memory for
// combining of the projections (splits>1) and start removing previous results.
// If our image does not fit in memory then we need to make sure we accumulate previous results too.
// This is done in 2 steps:
// 1)copy previous results back into GPU
// 2)accumulate with current results
// The code to take them out is the same as when there are no splits needed
if( !fits_in_memory&&sp>0)
{
// 1) grab previous results and put them in the auxiliary variable dProjection_accum
for (dev = 0; dev < deviceCount; dev++)
{
cudaSetDevice(gpuids[dev]);
//Global index of FIRST projection on this set on this GPU
proj_global=i*PROJ_PER_BLOCK+dev*nangles_device;
if(proj_global>=nangles)
break;
// Unless its the last projection set, we have PROJ_PER_BLOCK angles. Otherwise...
if(i+1==noOfKernelCalls) //is it the last block?
projection_this_block=min(nangles_device-(noOfKernelCalls-1)*PROJ_PER_BLOCK, //the remaining angles that this GPU had to do (almost never PROJ_PER_BLOCK)
nangles-proj_global); //or whichever amount is left to finish all (this is for the last GPU)
else
projection_this_block=PROJ_PER_BLOCK;
cudaMemcpyAsync(dProjection_accum[(i%2)+dev*2], result[proj_global], projection_this_block*geo.nDetecV*geo.nDetecU*sizeof(float), cudaMemcpyHostToDevice,stream[dev*2+1]);
}
// 2) take the results from current compute call and add it to the code in execution.
for (dev = 0; dev < deviceCount; dev++)
{
cudaSetDevice(gpuids[dev]);
//Global index of FIRST projection on this set on this GPU
proj_global=i*PROJ_PER_BLOCK+dev*nangles_device;
if(proj_global>=nangles)
break;
// Unless its the last projection set, we have PROJ_PER_BLOCK angles. Otherwise...
if(i+1==noOfKernelCalls) //is it the last block?
projection_this_block=min(nangles_device-(noOfKernelCalls-1)*PROJ_PER_BLOCK, //the remaining angles that this GPU had to do (almost never PROJ_PER_BLOCK)
nangles-proj_global); //or whichever amount is left to finish all (this is for the last GPU)
else
projection_this_block=PROJ_PER_BLOCK;
cudaStreamSynchronize(stream[dev*2+1]); // wait until copy is finished
vecAddInPlaceInterp<<<(geo.nDetecU*geo.nDetecV*projection_this_block+MAXTREADS-1)/MAXTREADS,MAXTREADS,0,stream[dev*2]>>>(dProjection[(i%2)+dev*2],dProjection_accum[(i%2)+dev*2],(unsigned long)geo.nDetecU*geo.nDetecV*projection_this_block);
}
} // end accumulation case, where the image needs to be split
// Now, lets get out the projections from the previous execution of the kernels.
if (i>0)
{
for (dev = 0; dev < deviceCount; dev++)
{
cudaSetDevice(gpuids[dev]);
//Global index of FIRST projection on previous set on this GPU
proj_global=(i-1)*PROJ_PER_BLOCK+dev*nangles_device;
if (dev+1==deviceCount) { //is it the last device?
// projections assigned to this device is >=nangles_device-(deviceCount-1) and < nangles_device
if (i-1 < noOfKernelCallsLastDev) {
// The previous set(block) was not empty.
projection_this_block=min(PROJ_PER_BLOCK, nangles-proj_global);
}
else {
// The previous set was empty.
// This happens if deviceCount > PROJ_PER_BLOCK+1.
// e.g. PROJ_PER_BLOCK = 9, deviceCount = 11, nangles = 199.
// e.g. PROJ_PER_BLOCK = 1, deviceCount = 3, nangles = 7.
break;
}
}
else {
projection_this_block=PROJ_PER_BLOCK;
}
cudaMemcpyAsync(result[proj_global], dProjection[(int)(!(i%2))+dev*2], projection_this_block*geo.nDetecV*geo.nDetecU*sizeof(float), cudaMemcpyDeviceToHost,stream[dev*2+1]);
}
}
// Make sure Computation on kernels has finished before we launch the next batch.
for (dev = 0; dev < deviceCount; dev++)
{
cudaSetDevice(gpuids[dev]);
cudaStreamSynchronize(stream[dev*2]);
}
} // End noOfKernelCalls (i) loop.
// We still have the last set of projections to get out of GPUs
for (dev = 0; dev < deviceCount; dev++)
{
cudaSetDevice(gpuids[dev]);
//Global index of FIRST projection on this set on this GPU
proj_global=(noOfKernelCalls-1)*PROJ_PER_BLOCK+dev*nangles_device;
if(proj_global>=nangles)
break;
// How many projections are left here?
projection_this_block=min(nangles_device-(noOfKernelCalls-1)*PROJ_PER_BLOCK, //the remaining angles that this GPU had to do (almost never PROJ_PER_BLOCK)
nangles-proj_global); //or whichever amount is left to finish all (this is for the last GPU)
cudaDeviceSynchronize(); //Not really necesary, but just in case, we los nothing.
cudaCheckErrors("Error at copying the last set of projections out (or in the previous copy)");
cudaMemcpyAsync(result[proj_global], dProjection[(int)(!(noOfKernelCalls%2))+dev*2], projection_this_block*geo.nDetecV*geo.nDetecU*sizeof(float), cudaMemcpyDeviceToHost,stream[dev*2+1]);
}
// Make sure everyone has done their bussiness before the next image split:
for (dev = 0; dev < deviceCount; dev++)
{
cudaSetDevice(gpuids[dev]);
cudaDeviceSynchronize();
}
} // End image split loop.
cudaCheckErrors("Main loop fail");
///////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
cudaDestroyTextureObject(texImg[dev]);
cudaFreeArray(d_cuArrTex[dev]);
}
delete[] texImg; texImg = 0;
delete[] d_cuArrTex; d_cuArrTex = 0;
// Freeing Stage
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
cudaFree(dProjection[dev*2]);
cudaFree(dProjection[dev*2+1]);
}
free(dProjection);
if(!fits_in_memory){
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
cudaFree(dProjection_accum[dev*2]);
cudaFree(dProjection_accum[dev*2+1]);
}
free(dProjection_accum);
}
freeGeoArray(splits,geoArray);
cudaFreeHost(projParamsArrayHost);
cudaFreeHost(projFloatsArrayHost);
for (int i = 0; i < nStreams; ++i)
cudaStreamDestroy(stream[i]) ;
#ifndef NO_PINNED_MEMORY
if (isHostRegisterSupported & splits>1){
cudaHostUnregister(img);
}
#endif
cudaCheckErrors("cudaFree fail");
// cudaDeviceReset();
return 0;
}
void CreateTextureInterp(const GpuIds& gpuids,const float* imagedata,Geometry geo,cudaArray** d_cuArrTex, cudaTextureObject_t *texImage,bool allocate)
{
const unsigned int num_devices = gpuids.GetLength();
//size_t size_image=geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ;
const cudaExtent extent = make_cudaExtent(geo.nVoxelX, geo.nVoxelY, geo.nVoxelZ);
if(allocate){
for (unsigned int dev = 0; dev < num_devices; dev++){
cudaSetDevice(gpuids[dev]);
//cudaArray Descriptor
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
//cuda Array
cudaMalloc3DArray(&d_cuArrTex[dev], &channelDesc, extent);
cudaCheckErrors("Texture memory allocation fail");
}
}
for (unsigned int dev = 0; dev < num_devices; dev++){
cudaMemcpy3DParms copyParams = {0};
cudaSetDevice(gpuids[dev]);
//Array creation
copyParams.srcPtr = make_cudaPitchedPtr((void *)imagedata, extent.width*sizeof(float), extent.width, extent.height);
copyParams.dstArray = d_cuArrTex[dev];
copyParams.extent = extent;
copyParams.kind = cudaMemcpyHostToDevice;
cudaMemcpy3DAsync(©Params);
//cudaCheckErrors("Texture memory data copy fail");
//Array creation End
}
for (unsigned int dev = 0; dev < num_devices; dev++){
cudaSetDevice(gpuids[dev]);
cudaResourceDesc texRes;
memset(&texRes, 0, sizeof(cudaResourceDesc));
texRes.resType = cudaResourceTypeArray;
texRes.res.array.array = d_cuArrTex[dev];
cudaTextureDesc texDescr;
memset(&texDescr, 0, sizeof(cudaTextureDesc));
texDescr.normalizedCoords = false;
if (geo.accuracy>1){
texDescr.filterMode = cudaFilterModePoint;
geo.accuracy=1;
}
else{
texDescr.filterMode = cudaFilterModeLinear;
}
texDescr.addressMode[0] = cudaAddressModeBorder;
texDescr.addressMode[1] = cudaAddressModeBorder;
texDescr.addressMode[2] = cudaAddressModeBorder;
texDescr.readMode = cudaReadModeElementType;
cudaCreateTextureObject(&texImage[dev], &texRes, &texDescr, NULL);
cudaCheckErrors("Texture object creation fail");
}
}
/* This code generates the geometries needed to split the image properly in
* cases where the entire image does not fit in the memory of the GPU
**/
void splitImageInterp(unsigned int splits,Geometry geo,Geometry* geoArray, unsigned int nangles){
unsigned long splitsize=(geo.nVoxelZ+splits-1)/splits;// ceil if not divisible
for(unsigned int sp=0;sp<splits;sp++){
geoArray[sp]=geo;
// All of them are splitsize, but the last one, possible
geoArray[sp].nVoxelZ=((sp+1)*splitsize<geo.nVoxelZ)? splitsize: geo.nVoxelZ-splitsize*sp;
geoArray[sp].sVoxelZ= geoArray[sp].nVoxelZ* geoArray[sp].dVoxelZ;
// We need to redefine the offsets, as now each subimage is not aligned in the origin.
geoArray[sp].offOrigZ=(float *)malloc(nangles*sizeof(float));
for (unsigned int i=0;i<nangles;i++){
geoArray[sp].offOrigZ[i]=geo.offOrigZ[i]-geo.sVoxelZ/2+sp*geoArray[0].sVoxelZ+geoArray[sp].sVoxelZ/2;
}
}
}
/* This code precomputes The location of the source and the Delta U and delta V (in the warped space)
* to compute the locations of the x-rays. While it seems verbose and overly-optimized,
* it does saves about 30% of each of the kernel calls. Thats something!
**/
void computeDeltas(Geometry geo,unsigned int i, Point3D* uvorigin, Point3D* deltaU, Point3D* deltaV, Point3D* source){
Point3D S;
S.x=geo.DSO[i];
S.y=0;
S.z=0;
//End point
Point3D P,Pu0,Pv0;
P.x =-(geo.DSD[i]-geo.DSO[i]); P.y = geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); P.z = geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0);
Pu0.x=-(geo.DSD[i]-geo.DSO[i]); Pu0.y= geo.dDetecU*(1-((float)geo.nDetecU/2)+0.5); Pu0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0);
Pv0.x=-(geo.DSD[i]-geo.DSO[i]); Pv0.y= geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); Pv0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-1);
// Geomtric trasnformations:
// Now we have the Real world (OXYZ) coordinates of the bottom corner and its two neighbours.
// The obkjective is to get a position of the detector in a coordinate system where:
// 1-units are voxel size (in each direction can be different)
// 2-The image has the its first voxel at (0,0,0)
// 3-The image never rotates
// To do that, we need to compute the "deltas" the detector, or "by how much
// (in new xyz) does the voxels change when and index is added". To do that
// several geometric steps needs to be changed
//1.Roll,pitch,jaw
// The detector can have a small rotation.
// according to
//"A geometric calibration method for cone beam CT systems" Yang K1, Kwan AL, Miller DF, Boone JM. Med Phys. 2006 Jun;33(6):1695-706.
// Only the Z rotation will have a big influence in the image quality when they are small.
// Still all rotations are supported
// To roll pitch jaw, the detector has to be in centered in OXYZ.
P.x=0;Pu0.x=0;Pv0.x=0;
// Roll pitch yaw
rollPitchYaw(geo,i,&P);
rollPitchYaw(geo,i,&Pu0);
rollPitchYaw(geo,i,&Pv0);
//Now ltes translate the detector coordinates to DOD (original position on real coordinate system:
P.x=P.x-(geo.DSD[i]-geo.DSO[i]);
Pu0.x=Pu0.x-(geo.DSD[i]-geo.DSO[i]);
Pv0.x=Pv0.x-(geo.DSD[i]-geo.DSO[i]);
//2: Offset detector
//S doesnt need to chagne
//3: Rotate around RZ RY RZ
Point3D Pfinal, Pfinalu0, Pfinalv0;
Pfinal.x =P.x;
Pfinal.y =P.y +geo.offDetecU[i]; Pfinal.z =P.z +geo.offDetecV[i];
Pfinalu0.x=Pu0.x;
Pfinalu0.y=Pu0.y +geo.offDetecU[i]; Pfinalu0.z =Pu0.z +geo.offDetecV[i];
Pfinalv0.x=Pv0.x;
Pfinalv0.y=Pv0.y +geo.offDetecU[i]; Pfinalv0.z =Pv0.z +geo.offDetecV[i];
eulerZYZ(geo,&Pfinal);
eulerZYZ(geo,&Pfinalu0);
eulerZYZ(geo,&Pfinalv0);
eulerZYZ(geo,&S);
//3: Offset image (instead of offseting image, -offset everything else)
Pfinal.x =Pfinal.x-geo.offOrigX[i]; Pfinal.y =Pfinal.y-geo.offOrigY[i]; Pfinal.z =Pfinal.z-geo.offOrigZ[i];
Pfinalu0.x=Pfinalu0.x-geo.offOrigX[i]; Pfinalu0.y=Pfinalu0.y-geo.offOrigY[i]; Pfinalu0.z=Pfinalu0.z-geo.offOrigZ[i];
Pfinalv0.x=Pfinalv0.x-geo.offOrigX[i]; Pfinalv0.y=Pfinalv0.y-geo.offOrigY[i]; Pfinalv0.z=Pfinalv0.z-geo.offOrigZ[i];
S.x=S.x-geo.offOrigX[i]; S.y=S.y-geo.offOrigY[i]; S.z=S.z-geo.offOrigZ[i];
// As we want the (0,0,0) to be in a corner of the image, we need to translate everything (after rotation);
Pfinal.x =Pfinal.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinal.y =Pfinal.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinal.z =Pfinal.z +geo.sVoxelZ/2-geo.dVoxelZ/2;
Pfinalu0.x=Pfinalu0.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinalu0.y=Pfinalu0.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinalu0.z=Pfinalu0.z+geo.sVoxelZ/2-geo.dVoxelZ/2;
Pfinalv0.x=Pfinalv0.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinalv0.y=Pfinalv0.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinalv0.z=Pfinalv0.z+geo.sVoxelZ/2-geo.dVoxelZ/2;
S.x =S.x+geo.sVoxelX/2-geo.dVoxelX/2; S.y =S.y+geo.sVoxelY/2-geo.dVoxelY/2; S.z =S.z +geo.sVoxelZ/2-geo.dVoxelZ/2;
//4. Scale everything so dVoxel==1
Pfinal.x =Pfinal.x/geo.dVoxelX; Pfinal.y =Pfinal.y/geo.dVoxelY; Pfinal.z =Pfinal.z/geo.dVoxelZ;
Pfinalu0.x=Pfinalu0.x/geo.dVoxelX; Pfinalu0.y=Pfinalu0.y/geo.dVoxelY; Pfinalu0.z=Pfinalu0.z/geo.dVoxelZ;
Pfinalv0.x=Pfinalv0.x/geo.dVoxelX; Pfinalv0.y=Pfinalv0.y/geo.dVoxelY; Pfinalv0.z=Pfinalv0.z/geo.dVoxelZ;
S.x =S.x/geo.dVoxelX; S.y =S.y/geo.dVoxelY; S.z =S.z/geo.dVoxelZ;
//mexPrintf("COR: %f \n",geo.COR[i]);
//5. apply COR. Wherever everything was, now its offesetd by a bit.
// Only wors for standard rotaiton, not aribtary axis rotation.
float CORx, CORy;
CORx=-geo.COR[i]*sin(geo.alpha)/geo.dVoxelX;
CORy= geo.COR[i]*cos(geo.alpha)/geo.dVoxelY;
Pfinal.x+=CORx; Pfinal.y+=CORy;
Pfinalu0.x+=CORx; Pfinalu0.y+=CORy;
Pfinalv0.x+=CORx; Pfinalv0.y+=CORy;
S.x+=CORx; S.y+=CORy;
// return
*uvorigin=Pfinal;
deltaU->x=Pfinalu0.x-Pfinal.x;
deltaU->y=Pfinalu0.y-Pfinal.y;
deltaU->z=Pfinalu0.z-Pfinal.z;
deltaV->x=Pfinalv0.x-Pfinal.x;
deltaV->y=Pfinalv0.y-Pfinal.y;
deltaV->z=Pfinalv0.z-Pfinal.z;
*source=S;
}
float maxdistanceCuboid(Geometry geo,unsigned int i){
///////////
// Compute initial "t" so we access safely as less as out of bounds as possible.
//////////
float maxCubX,maxCubY,maxCubZ;
// Forgetting Z, compute mas distance: diagonal+offset
maxCubX=(geo.nVoxelX/2+ abs(geo.offOrigX[i])/geo.dVoxelX);
maxCubY=(geo.nVoxelY/2+ abs(geo.offOrigY[i])/geo.dVoxelY);
maxCubZ=(geo.nVoxelZ/2+ abs(geo.offOrigZ[i])/geo.dVoxelZ);
float a,b;
a=geo.DSO[i]/geo.dVoxelX;
b=geo.DSO[i]/geo.dVoxelY;
// As the return of this value is in "voxel space", the source may have an elliptical curve.
// The distance returned is the safe distance that can be skipped for a given angle alpha, before we need to start sampling.
if (geo.theta==0.0f & geo.psi==0.0f) // Special case, it will make the code faster
return max(a*b/sqrt(a*a*sin(geo.alpha)*sin(geo.alpha)+b*b*cos(geo.alpha)*cos(geo.alpha))-
sqrt(maxCubX*maxCubX+maxCubY*maxCubY),0.0f);
//TODO: think of more special cases?
return max(geo.DSO[i]/max(max(geo.dVoxelX,geo.dVoxelY),geo.dVoxelZ)-sqrt(maxCubX*maxCubX+maxCubY*maxCubY+maxCubZ*maxCubZ),0.0f);
}
void rollPitchYaw(Geometry geo,unsigned int i, Point3D* point){
Point3D auxPoint;
auxPoint.x=point->x;
auxPoint.y=point->y;
auxPoint.z=point->z;
point->x=cos(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x
+(cos(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) - sin(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y
+(cos(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) + sin(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.z;
point->y=sin(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x
+(sin(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) + cos(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y
+(sin(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) - cos(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.z;
point->z=-sin(geo.dPitch[i])*auxPoint.x
+cos(geo.dPitch[i])*sin(geo.dYaw[i])*auxPoint.y
+cos(geo.dPitch[i])*cos(geo.dYaw[i])*auxPoint.z;
}
void eulerZYZ(Geometry geo, Point3D* point){
Point3D auxPoint;
auxPoint.x=point->x;
auxPoint.y=point->y;
auxPoint.z=point->z;
point->x=(+cos(geo.alpha)*cos(geo.theta)*cos(geo.psi)-sin(geo.alpha)*sin(geo.psi))*auxPoint.x+
(-cos(geo.alpha)*cos(geo.theta)*sin(geo.psi)-sin(geo.alpha)*cos(geo.psi))*auxPoint.y+
cos(geo.alpha)*sin(geo.theta)*auxPoint.z;
point->y=(+sin(geo.alpha)*cos(geo.theta)*cos(geo.psi)+cos(geo.alpha)*sin(geo.psi))*auxPoint.x+
(-sin(geo.alpha)*cos(geo.theta)*sin(geo.psi)+cos(geo.alpha)*cos(geo.psi))*auxPoint.y+
sin(geo.alpha)*sin(geo.theta)*auxPoint.z;
point->z=-sin(geo.theta)*cos(geo.psi)*auxPoint.x+
sin(geo.theta)*sin(geo.psi)*auxPoint.y+
cos(geo.theta)*auxPoint.z;
}
//______________________________________________________________________________
//
// Function: freeGeoArray
//
// Description: Frees the memory from the geometry array for multiGPU.
//______________________________________________________________________________
void freeGeoArray(unsigned int splits,Geometry* geoArray){
for(unsigned int sp=0;sp<splits;sp++){
free(geoArray[sp].offOrigZ);
}
free(geoArray);
}
//______________________________________________________________________________
//
// Function: checkFreeMemory
//
// Description: check available memory on devices
//______________________________________________________________________________
void checkFreeMemory(const GpuIds& gpuids, size_t *mem_GPU_global){
size_t memfree;
size_t memtotal;
int deviceCount = gpuids.GetLength();
for (int dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
cudaMemGetInfo(&memfree,&memtotal);
if(dev==0) *mem_GPU_global=memfree;
if(memfree<memtotal/2){
mexErrMsgIdAndTxt("tvDenoise:tvdenoising:GPU","One (or more) of your GPUs is being heavily used by another program (possibly graphics-based).\n Free the GPU to run TIGRE\n");
}
cudaCheckErrors("Check mem error");
*mem_GPU_global=(memfree<*mem_GPU_global)?memfree:*mem_GPU_global;
}
*mem_GPU_global=(size_t)((double)*mem_GPU_global*0.95);
//*mem_GPU_global= insert your known number here, in bytes.
}
|
f79803d66a4db1268b23855ee459564d3ed7e38f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void nmfw(double *a, int r, int c, int k, double *w, double *h, double *wcp)//must be block synchronized!!!
{
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
//compute W
if (col < k && row < r) {
//ah'
double sum = 0.0;
double temp = 0.0;
for (int i = 0; i < c; i++)
sum += a[row*c + i]*h[col*c + i];
temp = w[row*k+col]*sum;
//whh'
sum = 0.0;
for (int i = 0; i < c; i++) {
for (int j = 0; j < k; j++) {
sum += w[row*k + j]*h[j*c + i]*h[col*c+i];
}
}
__syncthreads();
wcp[row*k+col] = temp/sum;
}
}
|
f79803d66a4db1268b23855ee459564d3ed7e38f.cu
|
#include "includes.h"
__global__ void nmfw(double *a, int r, int c, int k, double *w, double *h, double *wcp)//must be block synchronized!!!
{
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
//compute W
if (col < k && row < r) {
//ah'
double sum = 0.0;
double temp = 0.0;
for (int i = 0; i < c; i++)
sum += a[row*c + i]*h[col*c + i];
temp = w[row*k+col]*sum;
//whh'
sum = 0.0;
for (int i = 0; i < c; i++) {
for (int j = 0; j < k; j++) {
sum += w[row*k + j]*h[j*c + i]*h[col*c+i];
}
}
__syncthreads();
wcp[row*k+col] = temp/sum;
}
}
|
3c53a0aae2ee84cf008cfef616d78a8fc8a50c28.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <assert.h>
#include <stdint.h>
#include <string.h>
#include <unistd.h>
#include <sys/time.h>
#define _inline __attribute__((unused)) static inline
#define ASSERT(cond) extern int __assert__[1-2*(!(cond))];
// -----------------------------------------------------------------------------
#define cuErr(err) (hipError_t(err,__FILE__,__LINE__))
#define cuSync(stream) cuErr(hipStreamSynchronize(stream))
#define cuPut(host,dev,size,stream) cuErr(hipMemcpyAsync(dev,host,size,hipMemcpyHostToDevice,stream))
#define cuGet(host,dev,size,stream) cuErr(hipMemcpyAsync(host,dev,size,hipMemcpyDeviceToHost,stream))
_inline void hipError_t(hipError_t err, const char *file, int line) { if (err!=hipSuccess) { fprintf(stderr,"%s:%i CUDA error %d:%s\n", file, line, err, hipGetErrorString(err)); exit(EXIT_FAILURE); } }
_inline void cuInfo(bool full=true) {
int deviceCount=0; hipError_t err=hipGetDeviceCount(&deviceCount);
if (err==38 || deviceCount==0) { fprintf(stderr,"No CUDA device\n"); exit(EXIT_FAILURE); }
else if (err!=hipSuccess) { fprintf(stderr,"CUDA error %d: %s.\n", err, hipGetErrorString(err)); exit(EXIT_FAILURE); }
int driverVersion=0, runtimeVersion=0; hipDriverGetVersion(&driverVersion); hipRuntimeGetVersion(&runtimeVersion);
printf("Found %d CUDA device(s), driver %d.%d, runtime %d.%d.\n", deviceCount, driverVersion/1000, driverVersion%100, runtimeVersion/1000, runtimeVersion%100);
for (int dev=0; dev<deviceCount; ++dev) {
hipDeviceProp_t prop; hipGetDeviceProperties(&prop, dev);
hipDeviceReset();
printf("- Device %d: '%s' (capability %d.%d, watchdog %s)\n", dev, prop.name, prop.major, prop.minor, prop.kernelExecTimeoutEnabled?"on":"off");
if (full) {
printf(" - Memory : %dMb @ %dMhz mapHost=%d, unifiedAddr=%d, asyncCopy=%d\n",(int)round(prop.totalGlobalMem/1048576.0),
prop.memoryClockRate>>10,prop.canMapHostMemory,prop.unifiedAddressing,prop.asyncEngineCount);
int m=prop.computeMode; const char* mode=(m==0?"default":(m==1?"exclusive":(m==2?"prohibited":"exclusiveProcess")));
printf(" - Processors: %d @ %dMHz, maxThreads=%d, warp=%d, concurrency=%d, mode=%s\n",prop.multiProcessorCount,
prop.clockRate>>10,prop.maxThreadsPerMultiProcessor,prop.warpSize,prop.concurrentKernels,mode);
printf(" - Limits : %d regs/block, %ldK sharedMem/proc, %d thr/block %d thr/proc, %d blocks\n",prop.regsPerBlock,
prop.sharedMemPerBlock>>10,prop.maxThreadsPerBlock,prop.maxThreadsPerMultiProcessor,prop.maxGridSize[0]);
}
}
}
#define BLOCK_SIZE 32
// vertically t, horizontally s
// size_t>= size_s
// chunk=block_size
#define C0 1
#define max(a,b) ({ __typeof__(a)_a = (a); __typeof__ (b) _b= (b); _a > _b ? _a : _b; })
__device__ unsigned int cost_f(unsigned left, unsigned top, unsigned diag, char s, char t, back* back) {
unsigned cl=left+C0;
unsigned ct=top+C0;
unsigned cd=diag+(s==t?0:C0);
if (cl>=max(ct,cd)) { *back='l'; }
if (ct>=max(cl,cd)) { *back='t'; }
if (cd>=max(cl,cr)) { *back='D'; }
return max(max(cl,cr),ct);
}
__global__ void dp1(unsigned s_size, char* s, unsigned t_size, char* t, char* back) {
__shared__ unsigned cost[BLOCK_SIZE*3];
unsigned idx = threadIdx.x;
// init, go vertically down along a part of t
unsigned d0=0;
unsigned d1=BLOCK_SIZE; // diag with delay 1
unsigned d2=BLOCK_SIZE*2; // diag with delay 2
for (unsigned i=0;i<blockDim.x;++i) {
if (idx<=i) {
if (idx==0 || i-idx==0) {
back[idx][i-idx]='.'; // at start
cost[d0+idx]=0;
} else {
cost[d0+idx]=cost_f(cost[d1+idx-1],cost[d1+idx],cost[d2+idx-1],s[i-idx],t[idx],back[i-idx][idx]);
}
}
unsigned dt=d2; d2=d1; d1=d0; d0=dt; // shifting diagonal
__syncthreads();
}
// continu
// termine
//unsigned index = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x;
}
int main() {
cuInfo();
cuErr(hipDeviceReset());
hipStream_t stream;
cuErr(hipStreamCreate(&stream));
hipLaunchKernelGGL(( dp1), dim3(1),dim3(1),0,stream, NULL,0,NULL,0,0);
hipStreamDestroy(stream);
hipDeviceReset();
return 0;
}
|
3c53a0aae2ee84cf008cfef616d78a8fc8a50c28.cu
|
#include <stdio.h>
#include <assert.h>
#include <stdint.h>
#include <string.h>
#include <unistd.h>
#include <sys/time.h>
#define _inline __attribute__((unused)) static inline
#define ASSERT(cond) extern int __assert__[1-2*(!(cond))];
// -----------------------------------------------------------------------------
#define cuErr(err) (cudaError(err,__FILE__,__LINE__))
#define cuSync(stream) cuErr(cudaStreamSynchronize(stream))
#define cuPut(host,dev,size,stream) cuErr(cudaMemcpyAsync(dev,host,size,cudaMemcpyHostToDevice,stream))
#define cuGet(host,dev,size,stream) cuErr(cudaMemcpyAsync(host,dev,size,cudaMemcpyDeviceToHost,stream))
_inline void cudaError(cudaError_t err, const char *file, int line) { if (err!=cudaSuccess) { fprintf(stderr,"%s:%i CUDA error %d:%s\n", file, line, err, cudaGetErrorString(err)); exit(EXIT_FAILURE); } }
_inline void cuInfo(bool full=true) {
int deviceCount=0; cudaError_t err=cudaGetDeviceCount(&deviceCount);
if (err==38 || deviceCount==0) { fprintf(stderr,"No CUDA device\n"); exit(EXIT_FAILURE); }
else if (err!=cudaSuccess) { fprintf(stderr,"CUDA error %d: %s.\n", err, cudaGetErrorString(err)); exit(EXIT_FAILURE); }
int driverVersion=0, runtimeVersion=0; cudaDriverGetVersion(&driverVersion); cudaRuntimeGetVersion(&runtimeVersion);
printf("Found %d CUDA device(s), driver %d.%d, runtime %d.%d.\n", deviceCount, driverVersion/1000, driverVersion%100, runtimeVersion/1000, runtimeVersion%100);
for (int dev=0; dev<deviceCount; ++dev) {
cudaDeviceProp prop; cudaGetDeviceProperties(&prop, dev);
cudaDeviceReset();
printf("- Device %d: '%s' (capability %d.%d, watchdog %s)\n", dev, prop.name, prop.major, prop.minor, prop.kernelExecTimeoutEnabled?"on":"off");
if (full) {
printf(" - Memory : %dMb @ %dMhz mapHost=%d, unifiedAddr=%d, asyncCopy=%d\n",(int)round(prop.totalGlobalMem/1048576.0),
prop.memoryClockRate>>10,prop.canMapHostMemory,prop.unifiedAddressing,prop.asyncEngineCount);
int m=prop.computeMode; const char* mode=(m==0?"default":(m==1?"exclusive":(m==2?"prohibited":"exclusiveProcess")));
printf(" - Processors: %d @ %dMHz, maxThreads=%d, warp=%d, concurrency=%d, mode=%s\n",prop.multiProcessorCount,
prop.clockRate>>10,prop.maxThreadsPerMultiProcessor,prop.warpSize,prop.concurrentKernels,mode);
printf(" - Limits : %d regs/block, %ldK sharedMem/proc, %d thr/block %d thr/proc, %d blocks\n",prop.regsPerBlock,
prop.sharedMemPerBlock>>10,prop.maxThreadsPerBlock,prop.maxThreadsPerMultiProcessor,prop.maxGridSize[0]);
}
}
}
#define BLOCK_SIZE 32
// vertically t, horizontally s
// size_t>= size_s
// chunk=block_size
#define C0 1
#define max(a,b) ({ __typeof__(a)_a = (a); __typeof__ (b) _b= (b); _a > _b ? _a : _b; })
__device__ unsigned int cost_f(unsigned left, unsigned top, unsigned diag, char s, char t, back* back) {
unsigned cl=left+C0;
unsigned ct=top+C0;
unsigned cd=diag+(s==t?0:C0);
if (cl>=max(ct,cd)) { *back='l'; }
if (ct>=max(cl,cd)) { *back='t'; }
if (cd>=max(cl,cr)) { *back='D'; }
return max(max(cl,cr),ct);
}
__global__ void dp1(unsigned s_size, char* s, unsigned t_size, char* t, char* back) {
__shared__ unsigned cost[BLOCK_SIZE*3];
unsigned idx = threadIdx.x;
// init, go vertically down along a part of t
unsigned d0=0;
unsigned d1=BLOCK_SIZE; // diag with delay 1
unsigned d2=BLOCK_SIZE*2; // diag with delay 2
for (unsigned i=0;i<blockDim.x;++i) {
if (idx<=i) {
if (idx==0 || i-idx==0) {
back[idx][i-idx]='.'; // at start
cost[d0+idx]=0;
} else {
cost[d0+idx]=cost_f(cost[d1+idx-1],cost[d1+idx],cost[d2+idx-1],s[i-idx],t[idx],back[i-idx][idx]);
}
}
unsigned dt=d2; d2=d1; d1=d0; d0=dt; // shifting diagonal
__syncthreads();
}
// continu
// termine
//unsigned index = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x;
}
int main() {
cuInfo();
cuErr(cudaDeviceReset());
cudaStream_t stream;
cuErr(cudaStreamCreate(&stream));
dp1<<<1,1,0,stream>>>(NULL,0,NULL,0,0);
cudaStreamDestroy(stream);
cudaDeviceReset();
return 0;
}
|
d06ce33c650738829cbd568967f8fe58e1b8d33d.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
Voronoi N BODY
Voronoi Grid
NBody
Morton Coding
Hilbert Coding
Thrust Sort
Body Interactions for region
*/
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <cmath>
#include <thrust/sort.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <algorithm>
#include <vector>
#include <numeric>
#define _USE_MATH_DEFINES
#include "math.h"
#ifdef _WIN32
#include<windows.h>
#endif
#include <GL/glew.h>
#include <GL/wglew.h>
#include <GL/glut.h>
const int N = 2048;
const int DIM = 1 << 4; // DIM = 2^n (n = 1, 2, 3..)
int ThreadsX = 128;
const int iterations = 100;
const double sim_rad = 1e18;
std::vector<float> results;
std::vector<float> vresults;
std::vector<float> sresults;
std::vector<float> cresults;
// Description of Voronoi Buf
struct VoronoiBuf
{
double x;
double y;
int morton;
int hilbert;
float4 colour;
void print()
{
std::cout << "\tMorton: " << morton <<" Hilbert: " << hilbert << std::endl;
std::cout <<"\tPosition(x,y): " << x << " " << y << std::endl;
//std::cout << "\tR: " << colour.x << " G: " << colour.y << " B: " << colour.z << std::endl;
}
};
VoronoiBuf* Voronoi_d;
double4* M_cd;
int* LUT_d;
int2* index_d;
// Description of Body
struct body
{
float4 colour;
double4 position;
double4 velocity;
int morton;
int hilbert;
double2 force;
bool operator==(body b)
{
return( (position.x == b.position.x) && (position.y == b.position.y) && (velocity.x == b.velocity.x) && (velocity.y == b.velocity.y) );
}
void error(body b)
{
double px = b.position.x - position.x;
double py = b.position.y - position.y;
double vx = b.velocity.x - velocity.x;
double vy = b.velocity.y - velocity.y;
double fx = b.velocity.z - velocity.z;
double fy = b.velocity.w - velocity.w;
std::cout << "Error in \n" << std::endl;
std::cout << "\tP: " << px << " " << py;
std::cout << std::endl;
std::cout << "\tV: " << vx << " " << vy;
std::cout << std::endl;
std::cout << "\tF: " << fx << " " << fy;
std::cout << std::endl;
}
void print()
{
std::cout << "\tMorton : " << morton << " Hilbert: " << hilbert;
std::cout << "\n\tPosition(x,y): " << position.x << " " << position.y;
std::cout << "\n\tV: " << velocity.x << " " << velocity.y;
std::cout << "\tF: " << velocity.z << " " << velocity.w;
std::cout << "\n\tMass: " << position.w << std::endl << std::endl;
}
void resetForce()
{
force = double2();
}
void addForce(body b)
{
double G = 6.67e-11; // gravational constant
double EPS = 3E4; // softening parameter
double dx = b.position.x - position.x;
double dy = b.position.y - position.y;
double dist = sqrt(dx*dx + dy*dy);
double F = (G * position.w * b.position.w) / (dist*dist + EPS*EPS);
force.x += F * dx / dist;
force.y += F * dy / dist;
}
void update()
{
velocity.x += 1e10 * force.x / position.w;
velocity.y += 1e10 * force.y / position.w;
position.x += 1e10 * velocity.x;
position.y += 1e10 * velocity.y;
}
};
// sort Hilbert
__host__ __device__ bool operator<(const body &lhs, const body &rhs)
{
return lhs.hilbert < rhs.hilbert;
}
// "Insert" a 0 bit after each of the 16 low bits of x
int Part1By1(int x)
{
x &= 0x0000ffff; // x = ---- ---- ---- ---- fedc ba98 7654 3210
x = (x ^ (x << 8)) & 0x00ff00ff; // x = ---- ---- fedc ba98 ---- ---- 7654 3210
x = (x ^ (x << 4)) & 0x0f0f0f0f; // x = ---- fedc ---- ba98 ---- 7654 ---- 3210
x = (x ^ (x << 2)) & 0x33333333; // x = --fe --dc --ba --98 --76 --54 --32 --10
x = (x ^ (x << 1)) & 0x55555555; // x = -f-e -d-c -b-a -9-8 -7-6 -5-4 -3-2 -1-0
return x;
}
// "Insert" two 0 bits after each of the 10 low bits of x
int Part1By2(int x)
{
x &= 0x000003ff; // x = ---- ---- ---- ---- ---- --98 7654 3210
x = (x ^ (x << 16)) & 0xff0000ff; // x = ---- --98 ---- ---- ---- ---- 7654 3210
x = (x ^ (x << 8)) & 0x0300f00f; // x = ---- --98 ---- ---- 7654 ---- ---- 3210
x = (x ^ (x << 4)) & 0x030c30c3; // x = ---- --98 ---- 76-- --54 ---- 32-- --10
x = (x ^ (x << 2)) & 0x09249249; // x = ---- 9--8 --7- -6-- 5--4 --3- -2-- 1--0
return x;
}
int EncodeMorton2(int x, int y)
{
return (Part1By1(y) << 1) + Part1By1(x);
}
int EncodeMorton3(int x, int y, int z)
{
return (Part1By2(z) << 2) + (Part1By2(y) << 1) + Part1By2(x);
}
//rotate/flip a quadrant appropriately
void rot(int n, int *x, int *y, int rx, int ry) {
if (ry == 0) {
if (rx == 1) {
*x = n-1 - *x;
*y = n-1 - *y;
}
//Swap x and y
int t = *x;
*x = *y;
*y = t;
}
}
//convert (x,y) to d
int EncodeHilbert2 (int n, int x, int y) {
int rx, ry, s, d=0;
for (s=n/2; s>0; s/=2) {
rx = (x & s) > 0;
ry = (y & s) > 0;
d += s * s * ((3 * rx) ^ ry);
rot(s, &x, &y, rx, ry);
}
return d;
}
double circlev(double x, double y)
{
double solarmass = 1.98892e30;
double r2 = sqrt(x*x + y*y);
double numerator = (6.67e-11)*1e6*solarmass;
return sqrt(numerator/r2);
}
double random() { return ((double)rand())/((double)RAND_MAX); }
template <typename T> int signum(T val) {
return (T(0) < val) - (val < T(0));
}
body init_body(int i)
{
double solarmass = 1.98892e30;
if(i != 0)
{
double px = sim_rad*exp(-1.8)*(0.5 - random());
double py = sim_rad*exp(-1.8)*(0.5 - random());
double magv = circlev(px, py);
double absangle = atan(abs(py/px));
double thetav = M_PI/2 - absangle;
double phiv = random() * M_PI;
double vx = -1*signum(py)*cos(thetav)*magv;
double vy = signum(px)*sin(thetav)*magv;
if(random() < 0.5)
{
vx = -vx;
vy = -vy;
}
double mass = random() * solarmass*10+1e20;
body b;
b.position.x = px;
b.position.y = py;
b.position.z = 0;
b.position.w = mass;
b.velocity.x = vx;
b.velocity.y = vy;
b.velocity.z = 0;
b.velocity.w = 0;
b.colour.x = 1.0f;
b.colour.y = 0.0f;
b.colour.z = 0.0f;
b.colour.w = 1.0f;
b.morton = 0;
b.hilbert = 0;
return b;
}
else
{
body b;
b.position.x = 0;
b.position.y = 0;
b.position.z = 0;
b.position.w = 1e6*solarmass;
b.velocity.x = 0;
b.velocity.y = 0;
b.velocity.z = 0;
b.velocity.w = 0;
b.colour.x = 1.0f;
b.colour.y = 1.0f;
b.colour.z = 0.0f;
b.colour.w = 1.0f;
b.morton = 0;
b.hilbert = 0;
return b;
}
}
// thrust::device_vector<body> b;
body* b_in;
body* b_out;
void cudaQuery();
__device__ double2 bodyBodyInteraction(double4 bi, double4 bj, double2 a, bool output_thread)
{
double G = 6.67e-11; // gravational constant
double EPS = 3E4; // softening parameter
// [2 FLOPS]
double dx = bj.x - bi.x;
double dy = bj.y - bi.y;
// [5 FLOPS]
double dist = sqrt(dx*dx + dy*dy) + 0.0000125; // additional softening parameter
// [6 FLOPS]
double F = (G * bi.w * bj.w) / (dist*dist + EPS*EPS);
// [6 FLOPS]
a.x += F * dx / dist;
a.y += F * dy / dist;
return a;
}
__device__ int findFirstValue(body* body_in, int h)
{
int left = 0;
int right = N-1;
while(right > left + 1)
{
int middle = (left + right)/2;
if(body_in[middle].hilbert >= h)
right = middle;
else
left = middle;
}
if(right < N-1 && body_in[right].hilbert == h)
return right;
return -1;
}
__global__ void
compute_center_mass_with_search(body* body_in, double4* c_m, int2* index_store)
{
// idx( 0 to uNumVoronoiPts )
unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
bool mass = 0;
double4 center;
center.x = 0.0;
center.y = 0.0;
center.z = 0.0;
center.w = 0.0;
int first = 0;
int last = 0;
int start = findFirstValue(body_in, idx);
if(start != -1)
{
// for all bodies
for(int b = start; b < N; b++)
{
int hil = body_in[b].hilbert;
if(idx == hil)
{
// if first hilbert in list
if(!mass)
{
center = body_in[b].position; //assign initial value
first = b;
mass = true;
}
else
{
double4 other = body_in[b].position;
center.x = center.x*center.w + other.x * other.w;
center.y = center.y*center.w + other.y * other.w;
//center.z = center.z*center.w + other.z * other.w;
center.w += other.w;
center.x = center.x/center.w;
center.y = center.y/center.w;
//center.z = center.z/center.w;
}
}
if(hil > idx)
{
first = (mass == 0) ? b : first;
//if((!mass))
// first = b;
last = b;
break;
}
}
}
last = ((last == 0) && (first != 0)) ? N-1 : last;
//if((last == 0) && (first != 0))
// last = N-1;
//center of mass for voronoi region
index_store[idx].x = first;
index_store[idx].y = last;
c_m[idx] = center;
}
__device__ double2 computeHilbertBodies(int h, int2* index_store, body* body_in, double4 position, double2 force)
{
for(int i = index_store[h].x; i < index_store[h].y; i++)
{
force = bodyBodyInteraction(position, body_in[i].position, force, false);
}
return force;
}
__device__ int2 findValue(int* LUT, int h)
{
for(int y = 0; y < DIM; y++)
{
for(int x = 0; x < DIM; x++)
{
if(LUT[y*DIM + x] == h)
{
int2 coords;
coords.x = x;
coords.y = y;
return coords;
}
}
}
int2 err;
err.x = -1;
err.y = -1;
return err;
}
__global__ void
nbody_kernel_hil(body* body_in, double4* c_m, int* LUT, int2* index_store, body* body_out)
{
// body[idx]
unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
// Indentify the regions neighbouring body[idx]
int2 coords = findValue(LUT, body_in[idx].hilbert);
int regions[9];
// 9 reads from global memory.........
regions[0] = LUT[(coords.y-1 + DIM)%DIM*DIM + (coords.x-1 + DIM)%DIM];
regions[1] = LUT[(coords.y-1 + DIM)%DIM*DIM + (coords.x + DIM)%DIM];
regions[2] = LUT[(coords.y-1 + DIM)%DIM*DIM + (coords.x+1 + DIM)%DIM];
regions[3] = LUT[(coords.y + DIM)%DIM*DIM + (coords.x-1 + DIM)%DIM];
regions[4] = LUT[(coords.y + DIM)%DIM*DIM + (coords.x + DIM)%DIM];
regions[5] = LUT[(coords.y + DIM)%DIM*DIM + (coords.x+1 + DIM)%DIM];
regions[6] = LUT[(coords.y+1 + DIM)%DIM*DIM + (coords.x-1 + DIM)%DIM];
regions[7] = LUT[(coords.y+1 + DIM)%DIM*DIM + (coords.x + DIM)%DIM];
regions[8] = LUT[(coords.y+1 + DIM)%DIM*DIM + (coords.x+1 + DIM)%DIM];
// position of body[idx]
double4 position = body_in[idx].position;
// calculate force contributions for body
double2 force;
force.x = 0.0;
force.y = 0.0;
// f is force of centerofmass relations with neighbouring regions
double2 f;
f.x = 0.0;
f.y = 0.0;
for(int i = 0; i < 9; i++)
{
force = computeHilbertBodies(regions[i], index_store, body_in, position, force);
f = bodyBodyInteraction(position, c_m[regions[i]], f, false);
}
// subtract the centerofmass influence to cancel when added to force
force.x = force.x - f.x;
force.y = force.y - f.y;
// perform center of mass interactions with all regions
for(int i = 0; i < DIM*DIM; i++)
{
double4 p_j = c_m[i];
force = bodyBodyInteraction(position, p_j, force, false);
}
// Do update
double4 velocity = body_in[idx].velocity;
double2 v;
v.x = velocity.x + 1e10 * force.x / position.w;
v.y = velocity.y + 1e10 * force.y / position.w;
body_out[idx].velocity.x = v.x;
body_out[idx].velocity.y = v.y;
body_out[idx].velocity.z = force.x;
body_out[idx].velocity.w = force.y;
body_out[idx].position.x = position.x + 1e10 * v.x;
body_out[idx].position.y = position.y + 1e10 * v.y;
body_out[idx].position.z = 0;
body_out[idx].position.w = position.w;
body_out[idx].colour = body_in[idx].colour;
body_out[idx].morton = body_in[idx].morton;
body_out[idx].hilbert = body_in[idx].hilbert;
}
// Create Voronoi kernel
__global__ void create_voronoi( body* body_in, body* body_out, VoronoiBuf * v)
{
// map from thread to pixel position
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
double x = body_in[idx].position.x;
double y = body_in[idx].position.y;
// if in image
// set min dist to distance from 1st position in buffer
double d_x = (v[0].x - x);
double d_y = (v[0].y - y);
double d = (d_x*d_x + d_y*d_y);
double minDist = d;
int minDistPoint = 0;
for(int i = 0; i < DIM*DIM; i++)
{
double diff_x = (v[i].x - x);
double diff_y = (v[i].y - y);
double dist = (diff_x*diff_x + diff_y*diff_y);
if(dist < minDist)
{
minDist = dist;
minDistPoint = i;
}
}
// now calculate the value at that position
body_out[idx].morton = v[minDistPoint].morton;
body_out[idx].hilbert = v[minDistPoint].hilbert;
body_out[idx].colour = v[minDistPoint].colour;
body_out[idx].position = body_in[idx].position;
body_out[idx].velocity = body_in[idx].velocity;
}
bool cudaCheckAPIError(hipError_t err)
{
if(err != hipSuccess)
{
std::cerr << "Error : " << hipGetErrorString(err) << std::endl;
system("pause");
return false;
}
return true;
}
template <class T>
void printLine(const char* message, T value)
{
std::cout << message << "\t : " << value << std::endl;
}
template <class T>
void printLine(const char* message, T* value)
{
if(value[2] == NULL)
std::cout << message << "\t : " << value[0] << ", " << value[1] << std::endl;
else
std::cout << message << "\t : " << value[0] << " " << value[1] << " " << value[2] << std::endl;
}
void printBlank()
{
std::cout << std::endl;
}
size_t RoundUp(int groupSize, int globalSize)
{
int r = globalSize % groupSize;
if(r == 0)
{
return globalSize;
}
else
{
return globalSize + groupSize - r;
}
}
void cleanup()
{
//cleanup
cudaCheckAPIError( hipFree( b_in ) );
cudaCheckAPIError( hipFree( b_out ) );
cudaCheckAPIError( hipFree( M_cd) );
cudaCheckAPIError( hipFree( LUT_d ) );
cudaCheckAPIError( hipFree( Voronoi_d ) );
exit(EXIT_SUCCESS);
}
void Key(unsigned char key, int x, int y)
{
switch(key)
{
case '\033': // escape quits
case '\015': // Enter quits
case 'Q': // Q quits
case 'q': // q (or escape) quits
// Cleanup up and quit
cleanup();
break;
case 'p':
system("pause");
break;
}
}
float completeEvent(hipEvent_t start, hipEvent_t stop)
{
// Add the stop event to the GPUs queue of work
cudaCheckAPIError( hipEventRecord(stop, 0) );
// Wait until the event has completed so it is safe to read
cudaCheckAPIError( hipEventSynchronize(stop) );
// Determine the time elapsed between the events
float milliseconds = 0;
cudaCheckAPIError( hipEventElapsedTime(&milliseconds, start, stop) );
return milliseconds;
}
void renderBodies(body* b)
{
body bodies[N];
// copy data from device to host
//cudaCheckAPIError( hipEventRecord(startEvent, 0) );
cudaCheckAPIError( hipMemcpy( bodies, b, sizeof(body)*N, hipMemcpyDeviceToHost ) );
//completeEvent(startEvent, stopEvent, "retrieving output", false);
//for(int i = 0; i < N; i++)
// bodies[i].print();
glEnable( GL_POINT_SMOOTH );
glEnable( GL_BLEND );
glBlendFunc( GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA );
glPointSize( 2.0 );
glClearColor( 0.0, 0.0, 1.0, 1.0 );
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
//glColor3f(1, 0, 0);
glBegin(GL_POINTS);
for(int i = 0; i < N; i++)
{
glColor3f( bodies[i].colour.x, bodies[i].colour.y, bodies[i].colour.z );
glVertex2f(bodies[i].position.x/(sim_rad/4), bodies[i].position.y/(sim_rad/4));
}
glEnd();
glFinish();
glutSwapBuffers();
//for(int i = 0; i < N; i++)
// bodies[i].print();
//system("pause");
}
void outputCOM()
{
double4 com[DIM*DIM];
int2 index_store[DIM*DIM];
// copy data from device to host
cudaCheckAPIError( hipMemcpy( com, M_cd, sizeof(double4)*DIM*DIM, hipMemcpyDeviceToHost ) );
cudaCheckAPIError( hipMemcpy( index_store, index_d, sizeof(int2)*DIM*DIM, hipMemcpyDeviceToHost ) );
for(int i = 0; i < DIM*DIM; i++)
{
std::cout <<"H" << i << "\tX: " << com[i].x << " Y: " << com[i].y << " M: " << com[i].w << std::endl;
std::cout <<"\tFirst" << index_store[i].x << " Last: " << index_store[i].y << std::endl;
}
system("pause");
}
// Execute voronoi kernel
void executeVoronoi()
{
// Event parameters
hipEvent_t startEvent, stopEvent;
// Create the event using hipEventCreate
cudaCheckAPIError( hipEventCreate(&startEvent) );
cudaCheckAPIError( hipEventCreate(&stopEvent) );
dim3 grid( N/ThreadsX );
dim3 block( ThreadsX );
hipFuncSetCacheConfig(create_voronoi, hipFuncCachePreferL1);
cudaCheckAPIError( hipEventRecord(startEvent, 0) );
hipLaunchKernelGGL(( create_voronoi) , dim3(grid), dim3(block) , 0, 0, b_in, b_out, Voronoi_d);
vresults.push_back( completeEvent(startEvent, stopEvent) );
// Release events
cudaCheckAPIError( hipEventDestroy(startEvent) );
cudaCheckAPIError( hipEventDestroy(stopEvent) );
}
// compute center of mass
void computeCOM()
{
// Event parameters
hipEvent_t startEvent, stopEvent;
// Create the event using hipEventCreate
cudaCheckAPIError( hipEventCreate(&startEvent) );
cudaCheckAPIError( hipEventCreate(&stopEvent) );
dim3 grid( 1 );
dim3 block( DIM*DIM );
hipFuncSetCacheConfig(compute_center_mass_with_search, hipFuncCachePreferL1);
cudaCheckAPIError( hipEventRecord(startEvent, 0) );
hipLaunchKernelGGL(( compute_center_mass_with_search) , dim3(grid), dim3(block) , 0, 0, b_in, M_cd, index_d);
cresults.push_back( completeEvent(startEvent, stopEvent) );
// Release events
cudaCheckAPIError( hipEventDestroy(startEvent) );
cudaCheckAPIError( hipEventDestroy(stopEvent) );
}
void nBodyHil()
{
// Event parameters
hipEvent_t startEvent, stopEvent;
// Create the event using hipEventCreate
cudaCheckAPIError( hipEventCreate(&startEvent) );
cudaCheckAPIError( hipEventCreate(&stopEvent) );
dim3 grid(RoundUp(ThreadsX, N)/ThreadsX);
dim3 block(ThreadsX);
//printf("%d %d\n", grid.x, block.x);
//system("pause");
hipFuncSetCacheConfig(nbody_kernel_hil, hipFuncCachePreferL1);
// compute body updates
cudaCheckAPIError( hipEventRecord(startEvent, 0) );
hipLaunchKernelGGL(( nbody_kernel_hil) , dim3(grid), dim3(block) , 0, 0, b_in, M_cd, LUT_d, index_d, b_out);
results.push_back( completeEvent(startEvent, stopEvent) );
// Release events
cudaCheckAPIError( hipEventDestroy(startEvent) );
cudaCheckAPIError( hipEventDestroy(stopEvent) );
}
float outputStats(std::vector<float>& results)
{
// Median
std::sort( results.begin(), results.end());
float med = 0.0f;
if(results.size()/2 == 0)
med = results[ results.size()/2 ];
else
{
med = (results[ results.size()/2 ] + results[ results.size()/2 - 1])/2.0;
}
printf("Median: %.2f ms\n", med);
//printf("\t %.2f Mop/s\n", computeStats(med));
//printf("1. %.2f %d. %.2f\n", results[0], results.size()-1, results[results.size()-1]);
results.clear();
return med;
}
void sortBodies()
{
// copy bodies from device memory into thrust device vector
// first copy device to host,
// fill in host vector with host buffer
// copy from host to device vector
// perform sort
// copy device to host
// update host buffer
// copy to device memory
// put this is render, then we have 1 less copy to host
thrust::device_vector<body> b;
thrust::host_vector<body> output;
body bodies[N];
// copy data from device to host
//cudaCheckAPIError( hipEventRecord(startEvent, 0) );
cudaCheckAPIError( hipMemcpy( bodies, b_out, sizeof(body)*N, hipMemcpyDeviceToHost ) );
for(int i = 0; i < N; i++)
{
output.push_back(bodies[i]);
//bodies[i].print();
}
// Event parameters
hipEvent_t startEvent, stopEvent;
// Create the event using hipEventCreate
cudaCheckAPIError( hipEventCreate(&startEvent) );
cudaCheckAPIError( hipEventCreate(&stopEvent) );
b = output;
// compute body updates
cudaCheckAPIError( hipEventRecord(startEvent, 0) );
thrust::sort(b.begin(), b.end());
sresults.push_back( completeEvent(startEvent, stopEvent) );
// Release events
cudaCheckAPIError( hipEventDestroy(startEvent) );
cudaCheckAPIError( hipEventDestroy(stopEvent) );
// cast thrust vector to raw pointer -- b needs to be global/error on close
//b_in = thrust::raw_pointer_cast(b.data());
output = b;
for(int i= 0; i < N; i++)
{
bodies[i] = output[i];
}
cudaCheckAPIError( hipMemcpy( b_in, bodies, sizeof(body)*N, hipMemcpyHostToDevice) );
/*
printf("\nSorted\n");
for(unsigned int i = 0; i < output.size(); i++)
output[i].print();
system("pause");
*/
}
void Draw()
{
executeVoronoi();
sortBodies();
// compute center of mass requires sorted order
computeCOM();
//outputCOM();
//nBodySM();
//nBodyOrig();
nBodyHil();
renderBodies(b_out);
std::swap(b_in, b_out);
static int i = 0;
i++;
if(i > iterations)
{
i = 0;
float total_time = 0.0f;
// Output Interaction Results
printf("\nVoronoi NBody Results\n");
printf("Threads -\t%d\n", ThreadsX);
printf("Voronoi -\t");
total_time += outputStats(vresults);
printf("Sort -\t");
total_time += outputStats(sresults);
printf("CoM -\t");
total_time += outputStats(cresults);
printf("NBody -\t");
total_time += outputStats(results);
printf("Total Time for simulation - %.2f\n", total_time);
ThreadsX *= 2;
// system("pause");
}
if((ThreadsX > N) || (ThreadsX > 1024))
{
system("pause");
cleanup();
}
}
void initGL(int argc, char *argv[], int wWidth, int wHeight)
{
// init gl
glutInit( &argc, argv );
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA);
glutInitWindowPosition(500, 100);
glutInitWindowSize( wWidth, wHeight );
glutCreateWindow( "CUDA Nbody" );
// set callback functions
glutKeyboardFunc(Key);
glutDisplayFunc(Draw);
glutIdleFunc(Draw);
glewInit();
if (glewIsSupported("GL_VERSION_2_1"))
printf("Ready for OpenGL 2.1\n");
else
printf("Warning: Detected that OpenGL 2.1 not supported\n");
wglSwapIntervalEXT(false);
}
/*
int2 findValue(int** LUT, int h)
{
int n = 4;
for(int y = 0; y < n; y++)
{
for(int x = 0; x < n; x++)
{
if(LUT[x][y] == h)
{
int2 coords;
coords.x = x;
coords.y = y;
return coords;
}
}
}
int2 err;
err.x = -1;
err.y = -1;
return err;
}
int2 findValue(int* LUT, int h)
{
int n = 4;
for(int y = 0; y < n; y++)
{
for(int x = 0; x < n; x++)
{
if(LUT[y*n + x] == h)
{
int2 coords;
coords.x = x;
coords.y = y;
return coords;
}
}
}
int2 err;
err.x = -1;
err.y = -1;
return err;
}
void outputSearchZone(int** LUT, int zone)
{
int n = 4;
//find index of LUT for Hilbert zone subject of search
int2 idx = findValue(LUT, zone);
std::cout << "\nSearch zone " << zone << " is at index - " << idx.x << ", "<< idx.y << std::endl;
//std::cout << (idx - n - 1 + uNumVoronoiPts)%uNumVoronoiPts << " " << (idx - n + uNumVoronoiPts)%uNumVoronoiPts << " "<< (idx - n + 1 + uNumVoronoiPts)%uNumVoronoiPts << std::endl;
//std::cout << (idx - 1 + uNumVoronoiPts)%uNumVoronoiPts << " " << idx << " "<< (idx + 1 + uNumVoronoiPts)%uNumVoronoiPts << std::endl;
//std::cout << (idx + n - 1 + uNumVoronoiPts)%uNumVoronoiPts << " " << (idx + n + uNumVoronoiPts)%uNumVoronoiPts << " "<< (idx + n + 1 + uNumVoronoiPts)%uNumVoronoiPts << std::endl;
std::cout << LUT[(idx.x-1 + n)%n][(idx.y-1 + n)%n] << " " << LUT[(idx.x + n)%n][(idx.y-1 + n)%n]<< " "<< LUT[(idx.x+1 + n)%n][(idx.y-1 + n)%n] << std::endl;
std::cout << LUT[(idx.x-1 + n)%n][(idx.y + n)%n] << " " << LUT[(idx.x + n)%n][(idx.y + n)%n] << " "<< LUT[(idx.x+1 + n)%n][(idx.y + n)%n] << std::endl;
std::cout << LUT[(idx.x-1 + n)%n][(idx.y+1 + n)%n] << " " << LUT[(idx.x + n)%n][(idx.y+1 + n)%n] << " "<< LUT[(idx.x+1 + n)%n][(idx.y+1 + n)%n] << std::endl;
}
void outputSearchZone(int* LUT, int zone)
{
int n = 4;
//find index of LUT for Hilbert zone subject of search
int2 coords = findValue(LUT, zone);
std::cout << "\nSearch zone " << zone << " is at index - " << coords.x << ", "<< coords.y << std::endl;
//std::cout << (idx - n - 1 + uNumVoronoiPts)%uNumVoronoiPts << " " << (idx - n + uNumVoronoiPts)%uNumVoronoiPts << " "<< (idx - n + 1 + uNumVoronoiPts)%uNumVoronoiPts << std::endl;
//std::cout << (idx - 1 + uNumVoronoiPts)%uNumVoronoiPts << " " << idx << " "<< (idx + 1 + uNumVoronoiPts)%uNumVoronoiPts << std::endl;
//std::cout << (idx + n - 1 + uNumVoronoiPts)%uNumVoronoiPts << " " << (idx + n + uNumVoronoiPts)%uNumVoronoiPts << " "<< (idx + n + 1 + uNumVoronoiPts)%uNumVoronoiPts << std::endl;
std::cout << LUT[(coords.y-1 + n)%n*n + (coords.x-1 + n)%n]
<< " " << LUT[(coords.y-1 + n)%n*n + (coords.x + n)%n]<< " "
<< LUT[(coords.y-1 + n)%n*n + (coords.x+1 + n)%n] << std::endl;
std::cout << LUT[(coords.y + n)%n*n + (coords.x-1 + n)%n] << " "
<< LUT[(coords.y + n)%n*n + (coords.x + n)%n] << " "
<< LUT[(coords.y + n)%n*n + (coords.x+1 + n)%n]<< std::endl;
std::cout << LUT[(coords.y+1 + n)%n*n + (coords.x-1 + n)%n] << " "
<< LUT[(coords.y+1 + n)%n*n + (coords.x + n)%n] << " "
<< LUT[(coords.y+1 + n)%n*n + (coords.x+1 + n)%n]<< std::endl;
}
*/
int main(int argc, char** argv)
{
printf("N Body Benchmark CUDA\n\n");
// Initial body data
const int body_size = sizeof(body)*N;
const int voronoi_size = sizeof(VoronoiBuf)*DIM*DIM;
printf("Body List\n");
body* body_h = (body*)malloc( body_size );
for(int i = 0; i < N; i++)
{
body_h[i] = init_body(i);
//body_h[i].print();
}
printf("\n");
// Generate Voronoi Points
VoronoiBuf* Voronoi_h = (VoronoiBuf*)malloc(voronoi_size);
int** LUT_h = (int**)malloc(sizeof(int*)*DIM);
for(int i = 0; i < DIM; i++)
LUT_h[i] = (int*)malloc(sizeof(int)*DIM);
int* lut_h = (int*)malloc(sizeof(int)*DIM*DIM);
printf("Program Data\n");
printf("Number of Voronoi Points :\t%d\n", DIM*DIM);
int k = 0;
double radius = sim_rad/8;
double spacing_x = (2*radius)/ DIM;
double spacing_y = (2*radius)/ DIM;
printf("%d %g %g\n\n", DIM, spacing_x, spacing_y);
printf("Voronoi Points\n");
for(int y = -1*DIM/2; y < DIM/2; y++)
{
for(int x = -1*DIM/2; x < DIM/2; x++)
{
Voronoi_h[k].x = spacing_x/2 + spacing_x*y;
Voronoi_h[k].y = spacing_y/2 + spacing_y*x;
Voronoi_h[k].colour.x = (sin(2.4*k + 0) *127 + 128)/255;
Voronoi_h[k].colour.y = (sin(2.4*k + 2) *127 + 128)/255;
Voronoi_h[k].colour.z = (sin(2.4*k + 4) *127 + 128)/255;
Voronoi_h[k].colour.w = 1.0f;
Voronoi_h[k].morton = EncodeMorton2(x + DIM/2, y + DIM/2);
Voronoi_h[k].hilbert = EncodeHilbert2(DIM*DIM, x + DIM/2, y + DIM/2);
//Voronoi_h[k].print();
LUT_h[x+DIM/2][y+DIM/2] = Voronoi_h[k].hilbert;
lut_h[k] = Voronoi_h[k].hilbert;
//printf("%d - %d\t", k, LUT_h[x+DIM/2][y+DIM/2]);
k++;
}
//std::cout << std::endl;
}
/*
double spacing_x = (sim_rad/4)/ DIM;
double spacing_y = (sim_rad/4)/ DIM;
printf("%d %g %g\n\n", DIM, spacing_x, spacing_y);
printf("Voronoi Points\n");
for(int y = -1*DIM/2; y < DIM/2; y++)
{
for(int x = -1*DIM/2; x < DIM/2; x++)
{
Voronoi_h[k].x = spacing_x/2 + spacing_x*y;
Voronoi_h[k].y = spacing_y/2 + spacing_y*x;
Voronoi_h[k].colour.x = (sin(2.4*k + 0) *127 + 128)/255;
Voronoi_h[k].colour.y = (sin(2.4*k + 2) *127 + 128)/255;
Voronoi_h[k].colour.z = (sin(2.4*k + 4) *127 + 128)/255;
Voronoi_h[k].colour.w = 1.0f;
Voronoi_h[k].morton = EncodeMorton2(x + DIM/2, y + DIM/2);
Voronoi_h[k].hilbert = EncodeHilbert2(DIM*DIM, x + DIM/2, y + DIM/2);
Voronoi_h[k].print();
LUT_h[x+DIM/2][y+DIM/2] = Voronoi_h[k].hilbert;
lut_h[k] = Voronoi_h[k].hilbert;
printf("%d - %d\t", k, LUT_h[x+DIM/2][y+DIM/2]);
k++;
}
std::cout << std::endl;
}*/
/*
for(int i = 0; i < uNumVoronoiPts; i++)
{
outputSearchZone(LUT_h, i);
std::cout<<std::endl;
outputSearchZone(lut_h, i);
}
*/
// allocate memory on device for buffers
cudaCheckAPIError( hipMalloc( (void**)&b_in, body_size) );
cudaCheckAPIError( hipMalloc( (void**)&b_out, body_size) );
cudaCheckAPIError( hipMalloc( (void**)&Voronoi_d, voronoi_size) );
cudaCheckAPIError( hipMalloc( (void**)&LUT_d, sizeof(int)*DIM*DIM) );
cudaCheckAPIError( hipMalloc( (void**)&M_cd, sizeof(double4)*DIM*DIM) );
cudaCheckAPIError( hipMalloc( (void**)&index_d, sizeof(int2)*DIM*DIM) );
// copy data from host to device
cudaCheckAPIError( hipMemcpy( b_in, body_h, body_size, hipMemcpyHostToDevice) ); //same intial conditions
cudaCheckAPIError( hipMemcpy( Voronoi_d, Voronoi_h, voronoi_size, hipMemcpyHostToDevice) );
cudaCheckAPIError( hipMemcpy( LUT_d, lut_h, sizeof(int)*DIM*DIM, hipMemcpyHostToDevice) );
//free host memory
free( Voronoi_h );
free( body_h );
for(int i = 0; i < DIM; i++)
free(LUT_h[i]);
free( LUT_h );
free( lut_h );
// Output some useful data
printf("Number of Bodies : \t%d\n", N);
printf("NBody\n");
printf("Global Work Size :\t%d\n", RoundUp(ThreadsX, N)/ThreadsX );
printf("Local Work Size :\t%d\n\n\n", ThreadsX);
initGL(argc, argv, 512, 512);
glutMainLoop();
return 0;
}
// query device properties
void cudaQuery()
{
// determine number of CUDA devices
int count;
cudaCheckAPIError( hipGetDeviceCount(&count) );
printLine("Number of CUDA Devices ", count);
printBlank();
// output information on all devices
for(int i = 0; i < count; i++)
{
printLine("Device ", i+1);
// determine properties
hipDeviceProp_t properties;
cudaCheckAPIError( hipGetDeviceProperties(&properties, i) );
printLine("Name ", &properties.name);
printLine("Total Global Mem ", properties.totalGlobalMem);
printLine("Shared Mem Per Block ", properties.sharedMemPerBlock);
printLine("Regs Per Block ", properties.regsPerBlock);
printLine("Warp Size ", properties.warpSize);
printLine("MemPitch ", properties.memPitch);
printLine("Max Threads Per Block ", properties.maxThreadsPerBlock);
printLine("Max Threads Dim ", properties.maxThreadsDim);
printLine("Max Grid Size ", properties.maxGridSize);
printLine("Total Const Mem ", properties.totalConstMem);
printLine("Major ", properties.major);
printLine("Minor ", properties.minor);
printLine("Clock Rate ", properties.clockRate);
printLine("Texture Alignment ", properties.textureAlignment);
printLine("Device Overlap ", properties.deviceOverlap);
printLine("Multi Processor Count ", properties.multiProcessorCount);
printLine("Kernel Exec Timeout Enabled", properties.kernelExecTimeoutEnabled);
printLine("Integrated ", properties.integrated);
printLine("Can Map Host Memory ", properties.canMapHostMemory);
printLine("Compute Mode ", properties.computeMode);
printLine("Max Texture 1D ", properties.maxTexture1D);
printLine("Max Surface 2D ", properties.maxSurface2D);
printLine("Max Texture 2D ", properties.maxTexture2D);
printLine("Max Texture 3D ", properties.maxTexture3D);
printLine("Concurrent Kernels ", properties.concurrentKernels);
}
printBlank();
}
|
d06ce33c650738829cbd568967f8fe58e1b8d33d.cu
|
/*
Voronoi N BODY
Voronoi Grid
NBody
Morton Coding
Hilbert Coding
Thrust Sort
Body Interactions for region
*/
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <cmath>
#include <thrust/sort.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <algorithm>
#include <vector>
#include <numeric>
#define _USE_MATH_DEFINES
#include "math.h"
#ifdef _WIN32
#include<windows.h>
#endif
#include <GL/glew.h>
#include <GL/wglew.h>
#include <GL/glut.h>
const int N = 2048;
const int DIM = 1 << 4; // DIM = 2^n (n = 1, 2, 3..)
int ThreadsX = 128;
const int iterations = 100;
const double sim_rad = 1e18;
std::vector<float> results;
std::vector<float> vresults;
std::vector<float> sresults;
std::vector<float> cresults;
// Description of Voronoi Buf
struct VoronoiBuf
{
double x;
double y;
int morton;
int hilbert;
float4 colour;
void print()
{
std::cout << "\tMorton: " << morton <<" Hilbert: " << hilbert << std::endl;
std::cout <<"\tPosition(x,y): " << x << " " << y << std::endl;
//std::cout << "\tR: " << colour.x << " G: " << colour.y << " B: " << colour.z << std::endl;
}
};
VoronoiBuf* Voronoi_d;
double4* M_cd;
int* LUT_d;
int2* index_d;
// Description of Body
struct body
{
float4 colour;
double4 position;
double4 velocity;
int morton;
int hilbert;
double2 force;
bool operator==(body b)
{
return( (position.x == b.position.x) && (position.y == b.position.y) && (velocity.x == b.velocity.x) && (velocity.y == b.velocity.y) );
}
void error(body b)
{
double px = b.position.x - position.x;
double py = b.position.y - position.y;
double vx = b.velocity.x - velocity.x;
double vy = b.velocity.y - velocity.y;
double fx = b.velocity.z - velocity.z;
double fy = b.velocity.w - velocity.w;
std::cout << "Error in \n" << std::endl;
std::cout << "\tP: " << px << " " << py;
std::cout << std::endl;
std::cout << "\tV: " << vx << " " << vy;
std::cout << std::endl;
std::cout << "\tF: " << fx << " " << fy;
std::cout << std::endl;
}
void print()
{
std::cout << "\tMorton : " << morton << " Hilbert: " << hilbert;
std::cout << "\n\tPosition(x,y): " << position.x << " " << position.y;
std::cout << "\n\tV: " << velocity.x << " " << velocity.y;
std::cout << "\tF: " << velocity.z << " " << velocity.w;
std::cout << "\n\tMass: " << position.w << std::endl << std::endl;
}
void resetForce()
{
force = double2();
}
void addForce(body b)
{
double G = 6.67e-11; // gravational constant
double EPS = 3E4; // softening parameter
double dx = b.position.x - position.x;
double dy = b.position.y - position.y;
double dist = sqrt(dx*dx + dy*dy);
double F = (G * position.w * b.position.w) / (dist*dist + EPS*EPS);
force.x += F * dx / dist;
force.y += F * dy / dist;
}
void update()
{
velocity.x += 1e10 * force.x / position.w;
velocity.y += 1e10 * force.y / position.w;
position.x += 1e10 * velocity.x;
position.y += 1e10 * velocity.y;
}
};
// sort Hilbert
__host__ __device__ bool operator<(const body &lhs, const body &rhs)
{
return lhs.hilbert < rhs.hilbert;
}
// "Insert" a 0 bit after each of the 16 low bits of x
int Part1By1(int x)
{
x &= 0x0000ffff; // x = ---- ---- ---- ---- fedc ba98 7654 3210
x = (x ^ (x << 8)) & 0x00ff00ff; // x = ---- ---- fedc ba98 ---- ---- 7654 3210
x = (x ^ (x << 4)) & 0x0f0f0f0f; // x = ---- fedc ---- ba98 ---- 7654 ---- 3210
x = (x ^ (x << 2)) & 0x33333333; // x = --fe --dc --ba --98 --76 --54 --32 --10
x = (x ^ (x << 1)) & 0x55555555; // x = -f-e -d-c -b-a -9-8 -7-6 -5-4 -3-2 -1-0
return x;
}
// "Insert" two 0 bits after each of the 10 low bits of x
int Part1By2(int x)
{
x &= 0x000003ff; // x = ---- ---- ---- ---- ---- --98 7654 3210
x = (x ^ (x << 16)) & 0xff0000ff; // x = ---- --98 ---- ---- ---- ---- 7654 3210
x = (x ^ (x << 8)) & 0x0300f00f; // x = ---- --98 ---- ---- 7654 ---- ---- 3210
x = (x ^ (x << 4)) & 0x030c30c3; // x = ---- --98 ---- 76-- --54 ---- 32-- --10
x = (x ^ (x << 2)) & 0x09249249; // x = ---- 9--8 --7- -6-- 5--4 --3- -2-- 1--0
return x;
}
int EncodeMorton2(int x, int y)
{
return (Part1By1(y) << 1) + Part1By1(x);
}
int EncodeMorton3(int x, int y, int z)
{
return (Part1By2(z) << 2) + (Part1By2(y) << 1) + Part1By2(x);
}
//rotate/flip a quadrant appropriately
void rot(int n, int *x, int *y, int rx, int ry) {
if (ry == 0) {
if (rx == 1) {
*x = n-1 - *x;
*y = n-1 - *y;
}
//Swap x and y
int t = *x;
*x = *y;
*y = t;
}
}
//convert (x,y) to d
int EncodeHilbert2 (int n, int x, int y) {
int rx, ry, s, d=0;
for (s=n/2; s>0; s/=2) {
rx = (x & s) > 0;
ry = (y & s) > 0;
d += s * s * ((3 * rx) ^ ry);
rot(s, &x, &y, rx, ry);
}
return d;
}
double circlev(double x, double y)
{
double solarmass = 1.98892e30;
double r2 = sqrt(x*x + y*y);
double numerator = (6.67e-11)*1e6*solarmass;
return sqrt(numerator/r2);
}
double random() { return ((double)rand())/((double)RAND_MAX); }
template <typename T> int signum(T val) {
return (T(0) < val) - (val < T(0));
}
body init_body(int i)
{
double solarmass = 1.98892e30;
if(i != 0)
{
double px = sim_rad*exp(-1.8)*(0.5 - random());
double py = sim_rad*exp(-1.8)*(0.5 - random());
double magv = circlev(px, py);
double absangle = atan(abs(py/px));
double thetav = M_PI/2 - absangle;
double phiv = random() * M_PI;
double vx = -1*signum(py)*cos(thetav)*magv;
double vy = signum(px)*sin(thetav)*magv;
if(random() < 0.5)
{
vx = -vx;
vy = -vy;
}
double mass = random() * solarmass*10+1e20;
body b;
b.position.x = px;
b.position.y = py;
b.position.z = 0;
b.position.w = mass;
b.velocity.x = vx;
b.velocity.y = vy;
b.velocity.z = 0;
b.velocity.w = 0;
b.colour.x = 1.0f;
b.colour.y = 0.0f;
b.colour.z = 0.0f;
b.colour.w = 1.0f;
b.morton = 0;
b.hilbert = 0;
return b;
}
else
{
body b;
b.position.x = 0;
b.position.y = 0;
b.position.z = 0;
b.position.w = 1e6*solarmass;
b.velocity.x = 0;
b.velocity.y = 0;
b.velocity.z = 0;
b.velocity.w = 0;
b.colour.x = 1.0f;
b.colour.y = 1.0f;
b.colour.z = 0.0f;
b.colour.w = 1.0f;
b.morton = 0;
b.hilbert = 0;
return b;
}
}
// thrust::device_vector<body> b;
body* b_in;
body* b_out;
void cudaQuery();
__device__ double2 bodyBodyInteraction(double4 bi, double4 bj, double2 a, bool output_thread)
{
double G = 6.67e-11; // gravational constant
double EPS = 3E4; // softening parameter
// [2 FLOPS]
double dx = bj.x - bi.x;
double dy = bj.y - bi.y;
// [5 FLOPS]
double dist = sqrt(dx*dx + dy*dy) + 0.0000125; // additional softening parameter
// [6 FLOPS]
double F = (G * bi.w * bj.w) / (dist*dist + EPS*EPS);
// [6 FLOPS]
a.x += F * dx / dist;
a.y += F * dy / dist;
return a;
}
__device__ int findFirstValue(body* body_in, int h)
{
int left = 0;
int right = N-1;
while(right > left + 1)
{
int middle = (left + right)/2;
if(body_in[middle].hilbert >= h)
right = middle;
else
left = middle;
}
if(right < N-1 && body_in[right].hilbert == h)
return right;
return -1;
}
__global__ void
compute_center_mass_with_search(body* body_in, double4* c_m, int2* index_store)
{
// idx( 0 to uNumVoronoiPts )
unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
bool mass = 0;
double4 center;
center.x = 0.0;
center.y = 0.0;
center.z = 0.0;
center.w = 0.0;
int first = 0;
int last = 0;
int start = findFirstValue(body_in, idx);
if(start != -1)
{
// for all bodies
for(int b = start; b < N; b++)
{
int hil = body_in[b].hilbert;
if(idx == hil)
{
// if first hilbert in list
if(!mass)
{
center = body_in[b].position; //assign initial value
first = b;
mass = true;
}
else
{
double4 other = body_in[b].position;
center.x = center.x*center.w + other.x * other.w;
center.y = center.y*center.w + other.y * other.w;
//center.z = center.z*center.w + other.z * other.w;
center.w += other.w;
center.x = center.x/center.w;
center.y = center.y/center.w;
//center.z = center.z/center.w;
}
}
if(hil > idx)
{
first = (mass == 0) ? b : first;
//if((!mass))
// first = b;
last = b;
break;
}
}
}
last = ((last == 0) && (first != 0)) ? N-1 : last;
//if((last == 0) && (first != 0))
// last = N-1;
//center of mass for voronoi region
index_store[idx].x = first;
index_store[idx].y = last;
c_m[idx] = center;
}
__device__ double2 computeHilbertBodies(int h, int2* index_store, body* body_in, double4 position, double2 force)
{
for(int i = index_store[h].x; i < index_store[h].y; i++)
{
force = bodyBodyInteraction(position, body_in[i].position, force, false);
}
return force;
}
__device__ int2 findValue(int* LUT, int h)
{
for(int y = 0; y < DIM; y++)
{
for(int x = 0; x < DIM; x++)
{
if(LUT[y*DIM + x] == h)
{
int2 coords;
coords.x = x;
coords.y = y;
return coords;
}
}
}
int2 err;
err.x = -1;
err.y = -1;
return err;
}
__global__ void
nbody_kernel_hil(body* body_in, double4* c_m, int* LUT, int2* index_store, body* body_out)
{
// body[idx]
unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
// Indentify the regions neighbouring body[idx]
int2 coords = findValue(LUT, body_in[idx].hilbert);
int regions[9];
// 9 reads from global memory.........
regions[0] = LUT[(coords.y-1 + DIM)%DIM*DIM + (coords.x-1 + DIM)%DIM];
regions[1] = LUT[(coords.y-1 + DIM)%DIM*DIM + (coords.x + DIM)%DIM];
regions[2] = LUT[(coords.y-1 + DIM)%DIM*DIM + (coords.x+1 + DIM)%DIM];
regions[3] = LUT[(coords.y + DIM)%DIM*DIM + (coords.x-1 + DIM)%DIM];
regions[4] = LUT[(coords.y + DIM)%DIM*DIM + (coords.x + DIM)%DIM];
regions[5] = LUT[(coords.y + DIM)%DIM*DIM + (coords.x+1 + DIM)%DIM];
regions[6] = LUT[(coords.y+1 + DIM)%DIM*DIM + (coords.x-1 + DIM)%DIM];
regions[7] = LUT[(coords.y+1 + DIM)%DIM*DIM + (coords.x + DIM)%DIM];
regions[8] = LUT[(coords.y+1 + DIM)%DIM*DIM + (coords.x+1 + DIM)%DIM];
// position of body[idx]
double4 position = body_in[idx].position;
// calculate force contributions for body
double2 force;
force.x = 0.0;
force.y = 0.0;
// f is force of centerofmass relations with neighbouring regions
double2 f;
f.x = 0.0;
f.y = 0.0;
for(int i = 0; i < 9; i++)
{
force = computeHilbertBodies(regions[i], index_store, body_in, position, force);
f = bodyBodyInteraction(position, c_m[regions[i]], f, false);
}
// subtract the centerofmass influence to cancel when added to force
force.x = force.x - f.x;
force.y = force.y - f.y;
// perform center of mass interactions with all regions
for(int i = 0; i < DIM*DIM; i++)
{
double4 p_j = c_m[i];
force = bodyBodyInteraction(position, p_j, force, false);
}
// Do update
double4 velocity = body_in[idx].velocity;
double2 v;
v.x = velocity.x + 1e10 * force.x / position.w;
v.y = velocity.y + 1e10 * force.y / position.w;
body_out[idx].velocity.x = v.x;
body_out[idx].velocity.y = v.y;
body_out[idx].velocity.z = force.x;
body_out[idx].velocity.w = force.y;
body_out[idx].position.x = position.x + 1e10 * v.x;
body_out[idx].position.y = position.y + 1e10 * v.y;
body_out[idx].position.z = 0;
body_out[idx].position.w = position.w;
body_out[idx].colour = body_in[idx].colour;
body_out[idx].morton = body_in[idx].morton;
body_out[idx].hilbert = body_in[idx].hilbert;
}
// Create Voronoi kernel
__global__ void create_voronoi( body* body_in, body* body_out, VoronoiBuf * v)
{
// map from thread to pixel position
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
double x = body_in[idx].position.x;
double y = body_in[idx].position.y;
// if in image
// set min dist to distance from 1st position in buffer
double d_x = (v[0].x - x);
double d_y = (v[0].y - y);
double d = (d_x*d_x + d_y*d_y);
double minDist = d;
int minDistPoint = 0;
for(int i = 0; i < DIM*DIM; i++)
{
double diff_x = (v[i].x - x);
double diff_y = (v[i].y - y);
double dist = (diff_x*diff_x + diff_y*diff_y);
if(dist < minDist)
{
minDist = dist;
minDistPoint = i;
}
}
// now calculate the value at that position
body_out[idx].morton = v[minDistPoint].morton;
body_out[idx].hilbert = v[minDistPoint].hilbert;
body_out[idx].colour = v[minDistPoint].colour;
body_out[idx].position = body_in[idx].position;
body_out[idx].velocity = body_in[idx].velocity;
}
bool cudaCheckAPIError(cudaError_t err)
{
if(err != cudaSuccess)
{
std::cerr << "Error : " << cudaGetErrorString(err) << std::endl;
system("pause");
return false;
}
return true;
}
template <class T>
void printLine(const char* message, T value)
{
std::cout << message << "\t : " << value << std::endl;
}
template <class T>
void printLine(const char* message, T* value)
{
if(value[2] == NULL)
std::cout << message << "\t : " << value[0] << ", " << value[1] << std::endl;
else
std::cout << message << "\t : " << value[0] << " " << value[1] << " " << value[2] << std::endl;
}
void printBlank()
{
std::cout << std::endl;
}
size_t RoundUp(int groupSize, int globalSize)
{
int r = globalSize % groupSize;
if(r == 0)
{
return globalSize;
}
else
{
return globalSize + groupSize - r;
}
}
void cleanup()
{
//cleanup
cudaCheckAPIError( cudaFree( b_in ) );
cudaCheckAPIError( cudaFree( b_out ) );
cudaCheckAPIError( cudaFree( M_cd) );
cudaCheckAPIError( cudaFree( LUT_d ) );
cudaCheckAPIError( cudaFree( Voronoi_d ) );
exit(EXIT_SUCCESS);
}
void Key(unsigned char key, int x, int y)
{
switch(key)
{
case '\033': // escape quits
case '\015': // Enter quits
case 'Q': // Q quits
case 'q': // q (or escape) quits
// Cleanup up and quit
cleanup();
break;
case 'p':
system("pause");
break;
}
}
float completeEvent(cudaEvent_t start, cudaEvent_t stop)
{
// Add the stop event to the GPUs queue of work
cudaCheckAPIError( cudaEventRecord(stop, 0) );
// Wait until the event has completed so it is safe to read
cudaCheckAPIError( cudaEventSynchronize(stop) );
// Determine the time elapsed between the events
float milliseconds = 0;
cudaCheckAPIError( cudaEventElapsedTime(&milliseconds, start, stop) );
return milliseconds;
}
void renderBodies(body* b)
{
body bodies[N];
// copy data from device to host
//cudaCheckAPIError( cudaEventRecord(startEvent, 0) );
cudaCheckAPIError( cudaMemcpy( bodies, b, sizeof(body)*N, cudaMemcpyDeviceToHost ) );
//completeEvent(startEvent, stopEvent, "retrieving output", false);
//for(int i = 0; i < N; i++)
// bodies[i].print();
glEnable( GL_POINT_SMOOTH );
glEnable( GL_BLEND );
glBlendFunc( GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA );
glPointSize( 2.0 );
glClearColor( 0.0, 0.0, 1.0, 1.0 );
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
//glColor3f(1, 0, 0);
glBegin(GL_POINTS);
for(int i = 0; i < N; i++)
{
glColor3f( bodies[i].colour.x, bodies[i].colour.y, bodies[i].colour.z );
glVertex2f(bodies[i].position.x/(sim_rad/4), bodies[i].position.y/(sim_rad/4));
}
glEnd();
glFinish();
glutSwapBuffers();
//for(int i = 0; i < N; i++)
// bodies[i].print();
//system("pause");
}
void outputCOM()
{
double4 com[DIM*DIM];
int2 index_store[DIM*DIM];
// copy data from device to host
cudaCheckAPIError( cudaMemcpy( com, M_cd, sizeof(double4)*DIM*DIM, cudaMemcpyDeviceToHost ) );
cudaCheckAPIError( cudaMemcpy( index_store, index_d, sizeof(int2)*DIM*DIM, cudaMemcpyDeviceToHost ) );
for(int i = 0; i < DIM*DIM; i++)
{
std::cout <<"H" << i << "\tX: " << com[i].x << " Y: " << com[i].y << " M: " << com[i].w << std::endl;
std::cout <<"\tFirst" << index_store[i].x << " Last: " << index_store[i].y << std::endl;
}
system("pause");
}
// Execute voronoi kernel
void executeVoronoi()
{
// Event parameters
cudaEvent_t startEvent, stopEvent;
// Create the event using cudaEventCreate
cudaCheckAPIError( cudaEventCreate(&startEvent) );
cudaCheckAPIError( cudaEventCreate(&stopEvent) );
dim3 grid( N/ThreadsX );
dim3 block( ThreadsX );
cudaFuncSetCacheConfig(create_voronoi, cudaFuncCachePreferL1);
cudaCheckAPIError( cudaEventRecord(startEvent, 0) );
create_voronoi <<< grid, block >>> (b_in, b_out, Voronoi_d);
vresults.push_back( completeEvent(startEvent, stopEvent) );
// Release events
cudaCheckAPIError( cudaEventDestroy(startEvent) );
cudaCheckAPIError( cudaEventDestroy(stopEvent) );
}
// compute center of mass
void computeCOM()
{
// Event parameters
cudaEvent_t startEvent, stopEvent;
// Create the event using cudaEventCreate
cudaCheckAPIError( cudaEventCreate(&startEvent) );
cudaCheckAPIError( cudaEventCreate(&stopEvent) );
dim3 grid( 1 );
dim3 block( DIM*DIM );
cudaFuncSetCacheConfig(compute_center_mass_with_search, cudaFuncCachePreferL1);
cudaCheckAPIError( cudaEventRecord(startEvent, 0) );
compute_center_mass_with_search <<< grid, block >>> (b_in, M_cd, index_d);
cresults.push_back( completeEvent(startEvent, stopEvent) );
// Release events
cudaCheckAPIError( cudaEventDestroy(startEvent) );
cudaCheckAPIError( cudaEventDestroy(stopEvent) );
}
void nBodyHil()
{
// Event parameters
cudaEvent_t startEvent, stopEvent;
// Create the event using cudaEventCreate
cudaCheckAPIError( cudaEventCreate(&startEvent) );
cudaCheckAPIError( cudaEventCreate(&stopEvent) );
dim3 grid(RoundUp(ThreadsX, N)/ThreadsX);
dim3 block(ThreadsX);
//printf("%d %d\n", grid.x, block.x);
//system("pause");
cudaFuncSetCacheConfig(nbody_kernel_hil, cudaFuncCachePreferL1);
// compute body updates
cudaCheckAPIError( cudaEventRecord(startEvent, 0) );
nbody_kernel_hil <<< grid, block >>> (b_in, M_cd, LUT_d, index_d, b_out);
results.push_back( completeEvent(startEvent, stopEvent) );
// Release events
cudaCheckAPIError( cudaEventDestroy(startEvent) );
cudaCheckAPIError( cudaEventDestroy(stopEvent) );
}
float outputStats(std::vector<float>& results)
{
// Median
std::sort( results.begin(), results.end());
float med = 0.0f;
if(results.size()/2 == 0)
med = results[ results.size()/2 ];
else
{
med = (results[ results.size()/2 ] + results[ results.size()/2 - 1])/2.0;
}
printf("Median: %.2f ms\n", med);
//printf("\t %.2f Mop/s\n", computeStats(med));
//printf("1. %.2f %d. %.2f\n", results[0], results.size()-1, results[results.size()-1]);
results.clear();
return med;
}
void sortBodies()
{
// copy bodies from device memory into thrust device vector
// first copy device to host,
// fill in host vector with host buffer
// copy from host to device vector
// perform sort
// copy device to host
// update host buffer
// copy to device memory
// put this is render, then we have 1 less copy to host
thrust::device_vector<body> b;
thrust::host_vector<body> output;
body bodies[N];
// copy data from device to host
//cudaCheckAPIError( cudaEventRecord(startEvent, 0) );
cudaCheckAPIError( cudaMemcpy( bodies, b_out, sizeof(body)*N, cudaMemcpyDeviceToHost ) );
for(int i = 0; i < N; i++)
{
output.push_back(bodies[i]);
//bodies[i].print();
}
// Event parameters
cudaEvent_t startEvent, stopEvent;
// Create the event using cudaEventCreate
cudaCheckAPIError( cudaEventCreate(&startEvent) );
cudaCheckAPIError( cudaEventCreate(&stopEvent) );
b = output;
// compute body updates
cudaCheckAPIError( cudaEventRecord(startEvent, 0) );
thrust::sort(b.begin(), b.end());
sresults.push_back( completeEvent(startEvent, stopEvent) );
// Release events
cudaCheckAPIError( cudaEventDestroy(startEvent) );
cudaCheckAPIError( cudaEventDestroy(stopEvent) );
// cast thrust vector to raw pointer -- b needs to be global/error on close
//b_in = thrust::raw_pointer_cast(b.data());
output = b;
for(int i= 0; i < N; i++)
{
bodies[i] = output[i];
}
cudaCheckAPIError( cudaMemcpy( b_in, bodies, sizeof(body)*N, cudaMemcpyHostToDevice) );
/*
printf("\nSorted\n");
for(unsigned int i = 0; i < output.size(); i++)
output[i].print();
system("pause");
*/
}
void Draw()
{
executeVoronoi();
sortBodies();
// compute center of mass requires sorted order
computeCOM();
//outputCOM();
//nBodySM();
//nBodyOrig();
nBodyHil();
renderBodies(b_out);
std::swap(b_in, b_out);
static int i = 0;
i++;
if(i > iterations)
{
i = 0;
float total_time = 0.0f;
// Output Interaction Results
printf("\nVoronoi NBody Results\n");
printf("Threads -\t%d\n", ThreadsX);
printf("Voronoi -\t");
total_time += outputStats(vresults);
printf("Sort -\t");
total_time += outputStats(sresults);
printf("CoM -\t");
total_time += outputStats(cresults);
printf("NBody -\t");
total_time += outputStats(results);
printf("Total Time for simulation - %.2f\n", total_time);
ThreadsX *= 2;
// system("pause");
}
if((ThreadsX > N) || (ThreadsX > 1024))
{
system("pause");
cleanup();
}
}
void initGL(int argc, char *argv[], int wWidth, int wHeight)
{
// init gl
glutInit( &argc, argv );
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA);
glutInitWindowPosition(500, 100);
glutInitWindowSize( wWidth, wHeight );
glutCreateWindow( "CUDA Nbody" );
// set callback functions
glutKeyboardFunc(Key);
glutDisplayFunc(Draw);
glutIdleFunc(Draw);
glewInit();
if (glewIsSupported("GL_VERSION_2_1"))
printf("Ready for OpenGL 2.1\n");
else
printf("Warning: Detected that OpenGL 2.1 not supported\n");
wglSwapIntervalEXT(false);
}
/*
int2 findValue(int** LUT, int h)
{
int n = 4;
for(int y = 0; y < n; y++)
{
for(int x = 0; x < n; x++)
{
if(LUT[x][y] == h)
{
int2 coords;
coords.x = x;
coords.y = y;
return coords;
}
}
}
int2 err;
err.x = -1;
err.y = -1;
return err;
}
int2 findValue(int* LUT, int h)
{
int n = 4;
for(int y = 0; y < n; y++)
{
for(int x = 0; x < n; x++)
{
if(LUT[y*n + x] == h)
{
int2 coords;
coords.x = x;
coords.y = y;
return coords;
}
}
}
int2 err;
err.x = -1;
err.y = -1;
return err;
}
void outputSearchZone(int** LUT, int zone)
{
int n = 4;
//find index of LUT for Hilbert zone subject of search
int2 idx = findValue(LUT, zone);
std::cout << "\nSearch zone " << zone << " is at index - " << idx.x << ", "<< idx.y << std::endl;
//std::cout << (idx - n - 1 + uNumVoronoiPts)%uNumVoronoiPts << " " << (idx - n + uNumVoronoiPts)%uNumVoronoiPts << " "<< (idx - n + 1 + uNumVoronoiPts)%uNumVoronoiPts << std::endl;
//std::cout << (idx - 1 + uNumVoronoiPts)%uNumVoronoiPts << " " << idx << " "<< (idx + 1 + uNumVoronoiPts)%uNumVoronoiPts << std::endl;
//std::cout << (idx + n - 1 + uNumVoronoiPts)%uNumVoronoiPts << " " << (idx + n + uNumVoronoiPts)%uNumVoronoiPts << " "<< (idx + n + 1 + uNumVoronoiPts)%uNumVoronoiPts << std::endl;
std::cout << LUT[(idx.x-1 + n)%n][(idx.y-1 + n)%n] << " " << LUT[(idx.x + n)%n][(idx.y-1 + n)%n]<< " "<< LUT[(idx.x+1 + n)%n][(idx.y-1 + n)%n] << std::endl;
std::cout << LUT[(idx.x-1 + n)%n][(idx.y + n)%n] << " " << LUT[(idx.x + n)%n][(idx.y + n)%n] << " "<< LUT[(idx.x+1 + n)%n][(idx.y + n)%n] << std::endl;
std::cout << LUT[(idx.x-1 + n)%n][(idx.y+1 + n)%n] << " " << LUT[(idx.x + n)%n][(idx.y+1 + n)%n] << " "<< LUT[(idx.x+1 + n)%n][(idx.y+1 + n)%n] << std::endl;
}
void outputSearchZone(int* LUT, int zone)
{
int n = 4;
//find index of LUT for Hilbert zone subject of search
int2 coords = findValue(LUT, zone);
std::cout << "\nSearch zone " << zone << " is at index - " << coords.x << ", "<< coords.y << std::endl;
//std::cout << (idx - n - 1 + uNumVoronoiPts)%uNumVoronoiPts << " " << (idx - n + uNumVoronoiPts)%uNumVoronoiPts << " "<< (idx - n + 1 + uNumVoronoiPts)%uNumVoronoiPts << std::endl;
//std::cout << (idx - 1 + uNumVoronoiPts)%uNumVoronoiPts << " " << idx << " "<< (idx + 1 + uNumVoronoiPts)%uNumVoronoiPts << std::endl;
//std::cout << (idx + n - 1 + uNumVoronoiPts)%uNumVoronoiPts << " " << (idx + n + uNumVoronoiPts)%uNumVoronoiPts << " "<< (idx + n + 1 + uNumVoronoiPts)%uNumVoronoiPts << std::endl;
std::cout << LUT[(coords.y-1 + n)%n*n + (coords.x-1 + n)%n]
<< " " << LUT[(coords.y-1 + n)%n*n + (coords.x + n)%n]<< " "
<< LUT[(coords.y-1 + n)%n*n + (coords.x+1 + n)%n] << std::endl;
std::cout << LUT[(coords.y + n)%n*n + (coords.x-1 + n)%n] << " "
<< LUT[(coords.y + n)%n*n + (coords.x + n)%n] << " "
<< LUT[(coords.y + n)%n*n + (coords.x+1 + n)%n]<< std::endl;
std::cout << LUT[(coords.y+1 + n)%n*n + (coords.x-1 + n)%n] << " "
<< LUT[(coords.y+1 + n)%n*n + (coords.x + n)%n] << " "
<< LUT[(coords.y+1 + n)%n*n + (coords.x+1 + n)%n]<< std::endl;
}
*/
int main(int argc, char** argv)
{
printf("N Body Benchmark CUDA\n\n");
// Initial body data
const int body_size = sizeof(body)*N;
const int voronoi_size = sizeof(VoronoiBuf)*DIM*DIM;
printf("Body List\n");
body* body_h = (body*)malloc( body_size );
for(int i = 0; i < N; i++)
{
body_h[i] = init_body(i);
//body_h[i].print();
}
printf("\n");
// Generate Voronoi Points
VoronoiBuf* Voronoi_h = (VoronoiBuf*)malloc(voronoi_size);
int** LUT_h = (int**)malloc(sizeof(int*)*DIM);
for(int i = 0; i < DIM; i++)
LUT_h[i] = (int*)malloc(sizeof(int)*DIM);
int* lut_h = (int*)malloc(sizeof(int)*DIM*DIM);
printf("Program Data\n");
printf("Number of Voronoi Points :\t%d\n", DIM*DIM);
int k = 0;
double radius = sim_rad/8;
double spacing_x = (2*radius)/ DIM;
double spacing_y = (2*radius)/ DIM;
printf("%d %g %g\n\n", DIM, spacing_x, spacing_y);
printf("Voronoi Points\n");
for(int y = -1*DIM/2; y < DIM/2; y++)
{
for(int x = -1*DIM/2; x < DIM/2; x++)
{
Voronoi_h[k].x = spacing_x/2 + spacing_x*y;
Voronoi_h[k].y = spacing_y/2 + spacing_y*x;
Voronoi_h[k].colour.x = (sin(2.4*k + 0) *127 + 128)/255;
Voronoi_h[k].colour.y = (sin(2.4*k + 2) *127 + 128)/255;
Voronoi_h[k].colour.z = (sin(2.4*k + 4) *127 + 128)/255;
Voronoi_h[k].colour.w = 1.0f;
Voronoi_h[k].morton = EncodeMorton2(x + DIM/2, y + DIM/2);
Voronoi_h[k].hilbert = EncodeHilbert2(DIM*DIM, x + DIM/2, y + DIM/2);
//Voronoi_h[k].print();
LUT_h[x+DIM/2][y+DIM/2] = Voronoi_h[k].hilbert;
lut_h[k] = Voronoi_h[k].hilbert;
//printf("%d - %d\t", k, LUT_h[x+DIM/2][y+DIM/2]);
k++;
}
//std::cout << std::endl;
}
/*
double spacing_x = (sim_rad/4)/ DIM;
double spacing_y = (sim_rad/4)/ DIM;
printf("%d %g %g\n\n", DIM, spacing_x, spacing_y);
printf("Voronoi Points\n");
for(int y = -1*DIM/2; y < DIM/2; y++)
{
for(int x = -1*DIM/2; x < DIM/2; x++)
{
Voronoi_h[k].x = spacing_x/2 + spacing_x*y;
Voronoi_h[k].y = spacing_y/2 + spacing_y*x;
Voronoi_h[k].colour.x = (sin(2.4*k + 0) *127 + 128)/255;
Voronoi_h[k].colour.y = (sin(2.4*k + 2) *127 + 128)/255;
Voronoi_h[k].colour.z = (sin(2.4*k + 4) *127 + 128)/255;
Voronoi_h[k].colour.w = 1.0f;
Voronoi_h[k].morton = EncodeMorton2(x + DIM/2, y + DIM/2);
Voronoi_h[k].hilbert = EncodeHilbert2(DIM*DIM, x + DIM/2, y + DIM/2);
Voronoi_h[k].print();
LUT_h[x+DIM/2][y+DIM/2] = Voronoi_h[k].hilbert;
lut_h[k] = Voronoi_h[k].hilbert;
printf("%d - %d\t", k, LUT_h[x+DIM/2][y+DIM/2]);
k++;
}
std::cout << std::endl;
}*/
/*
for(int i = 0; i < uNumVoronoiPts; i++)
{
outputSearchZone(LUT_h, i);
std::cout<<std::endl;
outputSearchZone(lut_h, i);
}
*/
// allocate memory on device for buffers
cudaCheckAPIError( cudaMalloc( (void**)&b_in, body_size) );
cudaCheckAPIError( cudaMalloc( (void**)&b_out, body_size) );
cudaCheckAPIError( cudaMalloc( (void**)&Voronoi_d, voronoi_size) );
cudaCheckAPIError( cudaMalloc( (void**)&LUT_d, sizeof(int)*DIM*DIM) );
cudaCheckAPIError( cudaMalloc( (void**)&M_cd, sizeof(double4)*DIM*DIM) );
cudaCheckAPIError( cudaMalloc( (void**)&index_d, sizeof(int2)*DIM*DIM) );
// copy data from host to device
cudaCheckAPIError( cudaMemcpy( b_in, body_h, body_size, cudaMemcpyHostToDevice) ); //same intial conditions
cudaCheckAPIError( cudaMemcpy( Voronoi_d, Voronoi_h, voronoi_size, cudaMemcpyHostToDevice) );
cudaCheckAPIError( cudaMemcpy( LUT_d, lut_h, sizeof(int)*DIM*DIM, cudaMemcpyHostToDevice) );
//free host memory
free( Voronoi_h );
free( body_h );
for(int i = 0; i < DIM; i++)
free(LUT_h[i]);
free( LUT_h );
free( lut_h );
// Output some useful data
printf("Number of Bodies : \t%d\n", N);
printf("NBody\n");
printf("Global Work Size :\t%d\n", RoundUp(ThreadsX, N)/ThreadsX );
printf("Local Work Size :\t%d\n\n\n", ThreadsX);
initGL(argc, argv, 512, 512);
glutMainLoop();
return 0;
}
// query device properties
void cudaQuery()
{
// determine number of CUDA devices
int count;
cudaCheckAPIError( cudaGetDeviceCount(&count) );
printLine("Number of CUDA Devices ", count);
printBlank();
// output information on all devices
for(int i = 0; i < count; i++)
{
printLine("Device ", i+1);
// determine properties
cudaDeviceProp properties;
cudaCheckAPIError( cudaGetDeviceProperties(&properties, i) );
printLine("Name ", &properties.name);
printLine("Total Global Mem ", properties.totalGlobalMem);
printLine("Shared Mem Per Block ", properties.sharedMemPerBlock);
printLine("Regs Per Block ", properties.regsPerBlock);
printLine("Warp Size ", properties.warpSize);
printLine("MemPitch ", properties.memPitch);
printLine("Max Threads Per Block ", properties.maxThreadsPerBlock);
printLine("Max Threads Dim ", properties.maxThreadsDim);
printLine("Max Grid Size ", properties.maxGridSize);
printLine("Total Const Mem ", properties.totalConstMem);
printLine("Major ", properties.major);
printLine("Minor ", properties.minor);
printLine("Clock Rate ", properties.clockRate);
printLine("Texture Alignment ", properties.textureAlignment);
printLine("Device Overlap ", properties.deviceOverlap);
printLine("Multi Processor Count ", properties.multiProcessorCount);
printLine("Kernel Exec Timeout Enabled", properties.kernelExecTimeoutEnabled);
printLine("Integrated ", properties.integrated);
printLine("Can Map Host Memory ", properties.canMapHostMemory);
printLine("Compute Mode ", properties.computeMode);
printLine("Max Texture 1D ", properties.maxTexture1D);
printLine("Max Surface 2D ", properties.maxSurface2D);
printLine("Max Texture 2D ", properties.maxTexture2D);
printLine("Max Texture 3D ", properties.maxTexture3D);
printLine("Concurrent Kernels ", properties.concurrentKernels);
}
printBlank();
}
|
6ce4788fc48a6e4058c2393c5d00274fded2567a.hip
|
// !!! This is a file automatically generated by hipify!!!
#ifndef GPUQUADVOLUME_CU
#define GPUQUADVOLUME_CU
struct Volume {
hipTextureObject_t high;
hipTextureObject_t low;
__device__ float
GetHigh(int i)
{
return tex2D<float>(high, 0, i);
}
__device__ float
GetLow(int i)
{
return tex2D<float>(low, 0, i);
}
Volume(float* l, float* h, int dim)
{
allocText(high, h, 1, dim);
allocText(low, l, 1, dim);
}
};
#endif
|
6ce4788fc48a6e4058c2393c5d00274fded2567a.cu
|
#ifndef GPUQUADVOLUME_CU
#define GPUQUADVOLUME_CU
struct Volume {
cudaTextureObject_t high;
cudaTextureObject_t low;
__device__ float
GetHigh(int i)
{
return tex2D<float>(high, 0, i);
}
__device__ float
GetLow(int i)
{
return tex2D<float>(low, 0, i);
}
Volume(float* l, float* h, int dim)
{
allocText(high, h, 1, dim);
allocText(low, l, 1, dim);
}
};
#endif
|
d90438c96024f2044821e6e4a8c31b6f91d2e2ad.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2011, Alex Krizhevsky (akrizhevsky@gmail.com)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <iostream>
#include <assert.h>
#include <nvmatrix_kernels.cuh>
#include <nvmatrix.cuh>
#include <conv_util.cuh>
using namespace std;
__device__ inline float square(const float a) {
return a * a;
}
/*
* blockIdx.y determines module in batches of B_Y
* blockIdx.x determines filter in batches of B_X * filtersPerThread
*
* weights: (numModules, numColors, filterPixels, numFilters)
* Not fully coalesced if B_X < 32, so use cache.
*/
template <int B_Y, int B_X, int filtersPerThread>
__global__ void kNormalizeLCWeights(float* weights, const uint numFilters, const int numModules, const uint weightsPerFilter, const float norm) {
const uint moduleIdx = B_Y * blockIdx.y + threadIdx.y;
const uint filterIdx = B_X * blockIdx.x + threadIdx.x;
float prod[filtersPerThread];
#pragma unroll
for (uint i = 0; i < filtersPerThread; ++i) {
prod[i] = 0;
}
if (moduleIdx < numModules) {
weights += moduleIdx * weightsPerFilter * numFilters + filterIdx;
for (uint p = 0; p < weightsPerFilter; ++p) {
#pragma unroll
for (uint i = 0; i < filtersPerThread; ++i) {
prod[i] += square(weights[p * numFilters + i * B_X]);
}
}
#pragma unroll
for (uint i = 0; i < filtersPerThread; ++i) {
prod[i] = sqrtf(prod[i]);
prod[i] = prod[i] > norm ? __fdividef(norm, prod[i]) : 1.0f;
}
for (uint p = 0; p < weightsPerFilter; ++p) {
#pragma unroll
for (uint i = 0; i < filtersPerThread; ++i) {
weights[p * numFilters + i * B_X] *= prod[i];
}
}
}
}
/*
* weights: (numModules, numColors, filterPixels, numFilters)
*/
void normalizeLocalWeights(NVMatrix& weights, int numModules, float norm) {
int numFilters = weights.getNumCols();
int weightsPerFilter = weights.getNumRows() / numModules;
assert(numModules * weightsPerFilter == weights.getNumRows());
assert(!weights.isTrans());
assert(weights.isContiguous());
assert(numFilters % 16 == 0);
int bx = numFilters % 32 == 0 ? 32 : 16;
int by = bx == 32 ? 4 : 8;
int filtersPerThread = numFilters % 128 == 0 ? 4 : numFilters % 64 == 0 ? 2 : 1;
dim3 blocks(numFilters / (bx * filtersPerThread), DIVUP(numModules, by));
dim3 threads(bx, by);
if (filtersPerThread == 4) {
hipFuncSetCacheConfig(kNormalizeLCWeights<4, 32, 4>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kNormalizeLCWeights<4, 32, 4>), dim3(blocks), dim3(threads), 0, 0, weights.getDevData(), numFilters, numModules, weightsPerFilter, norm);
} else if (filtersPerThread == 2) {
hipFuncSetCacheConfig(kNormalizeLCWeights<4, 32, 2>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kNormalizeLCWeights<4, 32, 2>), dim3(blocks), dim3(threads), 0, 0, weights.getDevData(), numFilters, numModules, weightsPerFilter, norm);
} else {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(kNormalizeLCWeights<4, 32, 1>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kNormalizeLCWeights<4, 32, 1>), dim3(blocks), dim3(threads), 0, 0, weights.getDevData(), numFilters, numModules, weightsPerFilter, norm);
} else {
hipFuncSetCacheConfig(kNormalizeLCWeights<8, 16, 1>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kNormalizeLCWeights<8, 16, 1>), dim3(blocks), dim3(threads), 0, 0, weights.getDevData(), numFilters, numModules, weightsPerFilter, norm);
}
}
}
/*
* Block size 4x32
* blockIdx.x determines img idx in batches of 32*imgsPerThread
* blockIdx.y determines channel idx, pixel idx in batches of 4
*
* threadIdx.x determins case idx
* threadIdx.y determines pixel idx
*
* imgs: (numChannels, imgPixels, numImages) with given imgStride
* target: (numChannels, tgtPixels, numImages)
*/
template <int imgsPerThread, bool checkCaseBounds>
__global__ void kCrop(float* imgs, float* target, const uint numImages, const int imgStride,
const uint imgSize, const uint tgtSize, const uint startY, const uint startX) {
const uint imgPixels = imgSize * imgSize;
const uint tgtPixels = tgtSize * tgtSize;
const uint caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x;
const uint blockChanIdx = blockIdx.y / DIVUP(tgtPixels, 4);
const uint tgtPixelIdx = 4*(blockIdx.y % DIVUP(tgtPixels, 4)) + threadIdx.y;
const uint tgtPxY = tgtPixelIdx / tgtSize;
const uint tgtPxX = tgtPixelIdx % tgtSize;
const uint srcPixelIdx = (startY + tgtPxY) * imgSize + startX + tgtPxX;
if (tgtPixelIdx < tgtPixels) {
imgs += (blockChanIdx * imgPixels + srcPixelIdx) * imgStride + caseIdx;
target += (blockChanIdx * tgtPixels + tgtPixelIdx) * numImages + caseIdx;
#pragma unroll
for (uint i = 0; i < imgsPerThread; ++i) {
if (!checkCaseBounds || (caseIdx + 32 * i < numImages)) {
target[i * 32] = imgs[i * 32];
}
}
}
}
/*
* Block size 4x32
* blockIdx.y determines pixel idx in batches of 4
* blockIdx.x determines case idx in batches of 32*imgsPerThread
* threadIdx.y determines pixel idx
* threadIdx.x determines case idx
*
* imgs: (3, imgPixels, numImages) with given imgStride
* target: (3, imgPixels, numImages)
*
* Each thread produces (y,u,v) values for a particular (r,g,b) pixel
*
* The RGB --> YUV transform is (http://en.wikipedia.org/wiki/YUV):
*
* [Y] [0.2126 0.7152 0.0722 ][R]
* [U] = [-0.09991 -0.33609 0.436 ][G]
* [V] [0.615 -0.55861 -0.05639][B]
*/
template <int imgsPerThread, bool checkCaseBounds>
__global__ void kRGBToYUV(float* imgs, float* target, const int imgPixels, const int numImages, const int imgStride) {
const int caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x;
const int pxIdx = blockIdx.y * 4 + threadIdx.y;
if (pxIdx < imgPixels) {
const int imgChannelStride = imgPixels * imgStride;
const int tgtChannelStride = imgPixels * numImages;
imgs += pxIdx * imgStride + caseIdx;
target += pxIdx * numImages + caseIdx;
#pragma unroll
for (int i = 0; i < imgsPerThread; ++i) {
if (!checkCaseBounds || caseIdx + i * 32 < numImages) {
const float R = imgs[0 * imgChannelStride + i * 32];
const float G = imgs[1 * imgChannelStride + i * 32];
const float B = imgs[2 * imgChannelStride + i * 32];
target[0 * tgtChannelStride + i * 32] = 0.2126f * R + 0.7152f * G + 0.0722f * B; // Y
target[1 * tgtChannelStride + i * 32] = -0.09991f * R + -0.33609f * G + 0.436f * B; // U
target[2 * tgtChannelStride + i * 32] = 0.615f * R + -0.55861f * G + -0.05639f * B; // V
}
}
}
}
__device__ inline float labf(const float x) {
if (x > 0.0088564517f) {
return __powf(x, 0.3333f);
}
return 7.787037f * x + 0.13793103f;
}
/*
* Block size 4x32
* blockIdx.y determines pixel idx in batches of 4
* blockIdx.x determines case idx in batches of 32*imgsPerThread
* threadIdx.y determines pixel idx
* threadIdx.x determines case idx
*
* imgs: (3, imgPixels, numImages) with given imgStride
* target: (3, imgPixels, numImages)
*
* This proceeds in two steps.
*
* - First, RGB values are linearly transformed to XYZ as per
* http://en.wikipedia.org/wiki/CIE_XYZ_color_space
* - Second, XYZ values are nonlinearly transformed to L*a*b* as per
* http://en.wikipedia.org/wiki/Lab_color_space#The_forward_transformation
*
* Each thread produces (L*,a*,b*) values for a particular (r,g,b) pixel
*
* The RGB --> XYZ transform is:
*
* [X] [0.49 0.31 0.2 ][R]
* [Y] = 5.6506753 * [0.17697 0.8124 0.01063 ][G]
* [Z] [0 0.01 0.99 ][B]
*
* NOTE: The input should be in the range 0-1. Don't do mean-subtraction beforehand.
*
* Then X_max, Y_max, Z_max = 5.6506753.
*
* The range of the L* values is [0, 100].
* If the center flag is given, the range will be [-50, 50].
*
*/
template <int imgsPerThread, bool checkCaseBounds, bool center>
__global__ void kRGBToLAB(float* imgs, float* target, const int imgPixels, const int numImages, const int imgStride) {
const int caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x;
const int pxIdx = blockIdx.y * 4 + threadIdx.y;
if (pxIdx < imgPixels) {
const int imgChannelStride = imgPixels * imgStride;
const int tgtChannelStride = imgPixels * numImages;
imgs += pxIdx * imgStride + caseIdx;
target += pxIdx * numImages + caseIdx;
#pragma unroll
for (int i = 0; i < imgsPerThread; ++i) {
if (!checkCaseBounds || caseIdx + i * 32 < numImages) {
const float R = imgs[0 * imgChannelStride + i * 32];
const float G = imgs[1 * imgChannelStride + i * 32];
const float B = imgs[2 * imgChannelStride + i * 32];
const float X = (0.49f * R + 0.31f * G + 0.2f * B);
const float Y = (0.17697f * R + 0.8124f * G + 0.01063f * B);
const float Z = (0.01f * G + 0.99f * B);
const float labX = labf(X);
const float labY = labf(Y);
const float labZ = labf(Z);
target[0 * tgtChannelStride + i * 32] = 116.0f * labY - 16.0f - (center ? 50.0f : 0); // L*
target[1 * tgtChannelStride + i * 32] = 500.0f * (labX - labY); // a*
target[2 * tgtChannelStride + i * 32] = 200.0f * (labY - labZ); // b*
}
}
}
}
/*
* Block size 16x32.
* Each block produces a 4x4 chunk of the output image.
* threadIdx.y determines pixel idx in 4x4 chunk.
* threadIdx.x determines case idx.
* blockIdx.x determines case idx in batches of 32*imgsPerThread.
* blockIdx.y determines 4x4 chunk idx, channel idx.
*
* imgs: (numChannels, imgPixels, numImages) with given imgStride
* target: (numChannels, tgtPixels, numImages)
*
* imgSize = scale * tgtSize (roughly)
*
* This is a rather naive kernel that relies on cache for speed. But all it's doing
* is basic texture manipulation, which is very local in nature, so it should be ok.
* Also, it will in practice be a tiny fraction of the runtime of a large convnet.
*
* So that is my justification for being lazy here.
*/
template <int imgsPerThread, bool checkCaseBounds>
__global__ void kResizeBilinear(float* imgs, float* target, const int imgSize, const int tgtSize,
const int numImages, const int imgStride, const float scale,
const float centerScale) {
const int numChunksX = DIVUP(tgtSize, 4);
const int numChunks = numChunksX * numChunksX;
const int channelIdx = blockIdx.y / numChunks;
const int chunkIdx = blockIdx.y % numChunks;
const int chunkIdxX = chunkIdx % numChunksX;
const int chunkIdxY = chunkIdx / numChunksX;
const int caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x;
const int imgPixels = imgSize * imgSize;
const int tgtPixels = tgtSize * tgtSize;
const int pxX = 4 * chunkIdxX + threadIdx.y % 4;
const int pxY = 4 * chunkIdxY + threadIdx.y / 4;
if (pxY < tgtSize && pxX < tgtSize) {
const int pxIdx = pxY * tgtSize + pxX;
imgs += channelIdx * imgPixels * imgStride + caseIdx;
target += channelIdx * tgtPixels * numImages + pxIdx * numImages + caseIdx;
// This will cause slight distortions at the edges when upsampling in some cases.
// But I think that's not a big deal.
const float srcPxX = fmaxf(0.0f, fminf(__int2float_rn(imgSize) - 1.01f, __int2float_rn(pxX) * scale + centerScale));
const float srcPxY = fmaxf(0.0f, fminf(__int2float_rn(imgSize) - 1.01f, __int2float_rn(pxY) * scale + centerScale));
const float u = floorf(srcPxX + 1) - srcPxX;
const float w = srcPxY - floorf(srcPxY);
// Consider doing max(0, min(imgSize, x)) here
const int srcPx0 = (__float2int_rd(srcPxY) * imgSize + __float2int_rd(srcPxX)); // top-left
const int srcPx1 = srcPx0 + 1; // top-right
const int srcPx2 = srcPx0 + imgSize; // bottom-left
const int srcPx3 = srcPx2 + 1; // bottom-right
#pragma unroll
for (int c = 0; c < imgsPerThread; ++c) {
if (!checkCaseBounds || caseIdx + c * 32 < numImages) {
const float val0 = imgs[srcPx0 * imgStride + c * 32];
const float val1 = imgs[srcPx1 * imgStride + c * 32];
const float val2 = imgs[srcPx2 * imgStride + c * 32];
const float val3 = imgs[srcPx3 * imgStride + c * 32];
const float c0 = u * (val0 - val1) + val1;
const float c1 = u * (val2 - val3) + val3;
target[32 * c] = w * (c1 - c0) + c0;
}
}
}
}
/*
* Block size B_YxB_X.
* B_X*imgsPerThread*blockIdx.x + threadIdx.x determines img idx
* B_Y*blockIdx.y + threadIdx.y determines img row (col if !horiz), channel idx
*
* imgs: (numChannels, imgPixels, numImages) with given imgStride
* filter: (1, 2*radius + 1)
* target: (numChannels, imgPixels, numImages)
*
* target can be the same matrix as imgs.
* radius must be one of 3, 5, 7, 9.
*
* Tried imgsPerThread, slower.
*/
template<int B_Y, int B_X, int radius>
__global__ void kGaussianBlur(float* imgs, float* filter, float* target, const int imgSize,
const int numImages, const int imgStride,
const bool horiz,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilter[radius];
const int imgPixels = imgSize * imgSize;
const int ty = B_Y * blockIdx.y + threadIdx.y;
const int channelIdx = ty / imgSize;
const int rowIdx = ty % imgSize;
const int imgIdx = B_X*blockIdx.x + threadIdx.x;
const int filterWidth = 2*radius+1;
// const int tidx = B_Y * threadIdx.y + threadIdx.x;
if (horiz) {
imgs += channelIdx * imgPixels * imgStride + rowIdx * imgSize * imgStride + imgIdx;
target += channelIdx * imgPixels * numImages + rowIdx * imgSize * numImages + imgIdx;
} else {
imgs += channelIdx * imgPixels * imgStride + rowIdx * imgStride + imgIdx;
target += channelIdx * imgPixels * numImages + rowIdx * numImages + imgIdx;
}
float outputs[filterWidth-1];
#pragma unroll
for (int r = 0; r < filterWidth-1; r++) {
outputs[r] = 0;
}
if (threadIdx.x < filterWidth-1) {
shFilter[threadIdx.x] = filter[threadIdx.x];
}
__syncthreads();
if (imgIdx < numImages) {
// This writes radius*2 = filterWidth - 1 values to outputs
#pragma unroll
for (int col = 0; col < radius; col++) {
float px = imgs[0];
#pragma unroll
for (int r = 0; r < radius + 1 + col; r++) {
outputs[r] += px * shFilter[radius + col - r];
}
imgs += horiz ? imgStride : imgStride * imgSize;
}
// Unfortunately this has to be at this level of granularity
if (scaleTargets != 0) {
for (int col = radius; col < imgSize ; col++) { // loop over img columns
float px = imgs[0];
target[0] = scaleTargets * target[0] + scaleOutputs * (outputs[0] + px * shFilter[0]);
#pragma unroll
for (int r = 1; r < radius*2; r++) {
outputs[r-1] = outputs[r] + px * shFilter[r];
}
outputs[filterWidth - 2] = px * shFilter[0];
imgs += horiz ? imgStride : imgStride * imgSize;
target += horiz ? numImages : numImages * imgSize;
}
#pragma unroll
for (int r = 0; r < radius; r++) {
float* t = &target[0];
t[0] = scaleTargets * t[0] + scaleOutputs * outputs[r];
target += horiz ? numImages : numImages * imgSize;
}
} else {
for (int col = radius; col < imgSize ; col++) { // loop over img columns
float px = imgs[0];
target[0] = scaleOutputs * (outputs[0] + px * shFilter[0]);
#pragma unroll
for (int r = 1; r < radius*2; r++) {
outputs[r-1] = outputs[r] + px * shFilter[r];
}
outputs[filterWidth - 2] = px * shFilter[0];
imgs += horiz ? imgStride : imgStride * imgSize;
target += horiz ? numImages : numImages * imgSize;
}
#pragma unroll
for (int r = 0; r < radius; r++) {
target[0] = scaleOutputs * outputs[r];
target += horiz ? numImages : numImages * imgSize;
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines output.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines output.y, filter idx in batches of B_Y*filtersPerThread
*
* So each block does one output for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* imgs: (numChannels, imgPixels, numImages)
* target: (numChannels, numOutputs, numImages)
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
* numFilters must be divisible by filtersPerThread
*/
template<int B_Y, int B_X, int imgsPerThread, int chansPerThread, bool checkCaseBounds>
__global__ void kBedOfNails(float* imgs, float* target, const int imgSize, const int numChannels,
const int numImages, const int startX, const int strideX, const int outputsX,
const bool reverse, const float scaleTargets, const float scaleOutput) {
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int numChanBlocks = DIVUP(numChannels, B_Y*chansPerThread);
const int outputIdxX = blockIdx.x / numImgBlocks;
const int outputIdxY = blockIdx.y / numChanBlocks;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockChanIdx = (blockIdx.y % numChanBlocks) * B_Y * chansPerThread;
const int myChanIdx = (blockChanIdx + threadIdx.y*chansPerThread);
if (myChanIdx >= numChannels) {
return;
}
// if (blockIdx.x != 0 || blockIdx.y != 0) {
// return;
// }
const int outputIdx = outputIdxY * outputsX + outputIdxX;
const int numOutputs = outputsX * outputsX;
const int imgPixels = imgSize * imgSize;
const int startImgPxX = startX + outputIdxX * strideX;
const int startImgPxY = startX + outputIdxY * strideX;
const int imgIdx = blockImgIdx + threadIdx.x;
const int imgPx = startImgPxY * imgSize + startImgPxX;
imgs += myChanIdx * imgPixels * numImages + imgPx * numImages + imgIdx;
target += (myChanIdx * numOutputs + outputIdx) * numImages + imgIdx;
if (scaleTargets != 0) {
if (!reverse) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < chansPerThread; c++) {
target[c * numOutputs * numImages + i * B_X] = scaleTargets * target[c * numOutputs * numImages + i * B_X] + scaleOutput * imgs[c * imgPixels * numImages + i * B_X];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < chansPerThread; c++) {
imgs[c * imgPixels * numImages + i * B_X] = scaleTargets * imgs[c * imgPixels * numImages + i * B_X] + scaleOutput * target[c * numOutputs * numImages + i * B_X];
}
}
}
}
} else {
if (!reverse) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < chansPerThread; c++) {
target[c * numOutputs * numImages + i * B_X] = scaleOutput * imgs[c * imgPixels * numImages + i * B_X];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < chansPerThread; c++) {
imgs[c * imgPixels * numImages + i * B_X] = scaleOutput * target[c * numOutputs * numImages + i * B_X];
}
}
}
}
}
}
/*
* imgs: (numChannels, imgPixels, numImages)
* target: (numChannels, outputs, numImages)
*/
void _convBedOfNails(NVMatrix& images, NVMatrix& target, int numChannels, int imgSize, int startX, int strideX,
bool reverse, float scaleTargets, float scaleOutput) {
int numImages = reverse ? target.getNumCols() : images.getNumCols();
int imgPixels = imgSize * imgSize;
assert(!images.isTrans());
assert(!target.isTrans());
assert(images.isContiguous());
assert(target.isContiguous());
assert(strideX > 1);
int outputsX = DIVUP(imgSize, strideX);
int outputs = outputsX * outputsX;
if (reverse) {
assert(target.getNumRows() == numChannels * outputs);
} else {
assert(images.getNumRows() == numChannels * imgPixels);
}
if (scaleTargets == 0) {
if (reverse) {
images.resize(numChannels * imgPixels, numImages);
images.apply(NVMatrixOps::Zero());
} else {
target.resize(numChannels*outputs, numImages);
}
} else {
if (reverse) {
assert(images.getNumRows() == numChannels * outputs);
assert(images.getNumCols() == numImages);
} else {
assert(target.getNumRows() == numChannels * outputs);
assert(target.getNumCols() == numImages);
}
}
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
bool checkCaseBounds = numImages % (32*imgsPerThread) != 0;
int chansPerThread = numChannels % 8 == 0 ? 2 : 1;
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages,32*imgsPerThread) * outputsX, DIVUP(numChannels, 4 * chansPerThread) * outputsX);
if (imgsPerThread == 4) {
if (chansPerThread == 1) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kBedOfNails<4, 32, 4, 1, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kBedOfNails<4, 32, 4, 1, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(),
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kBedOfNails<4, 32, 4, 1, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kBedOfNails<4, 32, 4, 1, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(),
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kBedOfNails<4, 32, 4, 2, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kBedOfNails<4, 32, 4, 2, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(),
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kBedOfNails<4, 32, 4, 2, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kBedOfNails<4, 32, 4, 2, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(),
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 2) {
if (chansPerThread == 1) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kBedOfNails<4, 32, 2, 1, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kBedOfNails<4, 32, 2, 1, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(),
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kBedOfNails<4, 32, 2, 1, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kBedOfNails<4, 32, 2, 1, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(),
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kBedOfNails<4, 32, 2, 2, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kBedOfNails<4, 32, 2, 2, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(),
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kBedOfNails<4, 32, 2, 2, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kBedOfNails<4, 32, 2, 2, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(),
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
}
}
} else {
if (chansPerThread == 1) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kBedOfNails<4, 32, 1, 1, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kBedOfNails<4, 32, 1, 1, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(),
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kBedOfNails<4, 32, 1, 1, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kBedOfNails<4, 32, 1, 1, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(),
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kBedOfNails<4, 32, 1, 2, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kBedOfNails<4, 32, 1, 2, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(),
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kBedOfNails<4, 32, 1, 2, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kBedOfNails<4, 32, 1, 2, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(),
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
}
}
}
}
void convBedOfNails(NVMatrix& images, NVMatrix& target, int numChannels, int imgSize, int startX,
int strideX, float scaleTargets, float scaleOutput) {
_convBedOfNails(images, target, numChannels, imgSize, startX, strideX, false, scaleTargets, scaleOutput);
}
void convBedOfNailsUndo(NVMatrix& actsGrad, NVMatrix& target, int numChannels, int imgSize,
int startX, int strideX, float scaleTargets, float scaleOutput) {
_convBedOfNails(target, actsGrad, numChannels, imgSize, startX, strideX, true, scaleTargets, scaleOutput);
}
/*
* imgs: (numChannels, imgPixels, numImages) with given imgStride
* filter: (1, 2*radius + 1)
* target: (numChannels, imgPixels, numImages)
*/
void convGaussianBlur(NVMatrix& images, NVMatrix& filter, NVMatrix& target, bool horiz, int numChannels,
float scaleTargets, float scaleOutputs) {
int numImages = images.getNumCols();
int radius = filter.getNumCols() / 2;
int imgPixels = images.getNumRows() / numChannels;
int imgSize = int(sqrt(imgPixels));
assert(imgPixels == imgSize * imgSize);
assert(radius >= 1 && radius <= 4);
assert(imgSize >= 2 * radius + 1);
assert(filter.getNumRows() == 1);
assert(images.getNumRows() == numChannels * imgPixels);
assert(!images.isTrans());
assert(!filter.isTrans());
assert(!target.isTrans());
assert(target.isContiguous());
if (scaleTargets == 0) {
target.resize(images);
} else {
assert(target.isSameDims(images));
}
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages, threads.x), DIVUP(numChannels*imgSize, threads.y));
if (radius == 1) {
hipFuncSetCacheConfig(kGaussianBlur<4, 32, 1>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kGaussianBlur<4, 32, 1>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filter.getDevData(), target.getDevData(),
imgSize, numImages, images.getStride(), horiz, scaleTargets, scaleOutputs);
} else if (radius == 2) {
hipFuncSetCacheConfig(kGaussianBlur<4, 32, 2>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kGaussianBlur<4, 32, 2>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filter.getDevData(), target.getDevData(),
imgSize, numImages, images.getStride(), horiz, scaleTargets, scaleOutputs);
} else if (radius == 3) {
hipFuncSetCacheConfig(kGaussianBlur<4, 32, 3>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kGaussianBlur<4, 32, 3>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filter.getDevData(), target.getDevData(),
imgSize, numImages, images.getStride(), horiz, scaleTargets, scaleOutputs);
} else if (radius == 4) {
hipFuncSetCacheConfig(kGaussianBlur<4, 32, 4>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kGaussianBlur<4, 32, 4>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filter.getDevData(), target.getDevData(),
imgSize, numImages, images.getStride(), horiz, scaleTargets, scaleOutputs);
}
}
/*
* Block size 1x128
* blockIdx.x determines pixel.x, image idx in batches of 128*imgsPerThread
* blockIdx.y determines pixel.y
*
* So each block does one output for some number of images and all the fliters.
*
* threadIdx.x determines img idx
*
* imgs: (numFilters, imgPixels, numImages)
* meanDiffs: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages) (out)
* target: (numFilters, imgPixels, numImages) (out)
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
* numFilters must be divisible by B_Y*filtersPerThread
*/
template<int imgsPerThread, int numFilters, bool checkCaseBounds>
__global__ void kCNorm_fewfilter(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize,
const int numImages, const int sizeX, const float addScale, const float powScale) {
const int imgPixels = imgSize * imgSize;
const int numImgBlocks = DIVUP(numImages, 128*imgsPerThread);
const int pxIdxX = blockIdx.x / numImgBlocks;
const int pxIdxY = blockIdx.y;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * 128 * imgsPerThread;
const int pxIdx = pxIdxY * imgSize + pxIdxX;
const int startPxX = -sizeX/2 + pxIdxX;
const int startPxY = -sizeX/2 + pxIdxY;
const int imgIdx = blockImgIdx + threadIdx.x;
imgs += pxIdx * numImages + imgIdx;
denoms += pxIdx * numImages + imgIdx;
meanDiffs += imgIdx;
target += pxIdx * numImages + imgIdx;
float prod[numFilters][imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * 128 < numImages) {
#pragma unroll
for (int f = 0; f < numFilters; f++) {
prod[f][i] = 0;
}
}
}
const int loopStartY = MAX(0, startPxY);
const int loopStartX = MAX(0, startPxX);
const int loopEndY = MIN(imgSize, startPxY + sizeX);
const int loopEndX = MIN(imgSize, startPxX + sizeX);
for (int y = loopStartY; y < loopEndY; y++) {
for (int x = loopStartX; x < loopEndX; x++) {
const int imgPx = y * imgSize + x;
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * 128 < numImages) {
#pragma unroll
for (int f = 0; f < numFilters; f++) {
prod[f][i] += square(meanDiffs[(f * imgPixels + imgPx) * numImages + i * 128]);
}
}
}
}
}
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * 128 < numImages) {
#pragma unroll
for (int f = 0; f < numFilters; f++) {
prod[f][i] = 1 + addScale * prod[f][i];
denoms[f * imgPixels * numImages + i * 128] = prod[f][i];
target[f * imgPixels * numImages + i * 128] = imgs[f * imgPixels * numImages + i * 128] * __powf(prod[f][i], -powScale);
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread
*
* So each block does one pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* imgs: (numFilters, imgPixels, numImages)
* means: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages) (out)
* target: (numFilters, imgPixels, numImages) (out)
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
* numFilters must be divisible by B_Y*filtersPerThread
*/
template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds>
__global__ void kCNorm_manyfilter(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize,
const int numFilters, const int numImages, const int sizeX,
const float addScale, const float powScale) {
const int imgPixels = imgSize * imgSize;
const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread);
const int numFilterBlocks = numFilters/(B_Y*filtersPerThread);
const int pxIdxX = blockIdx.x / numImgBlocks;
const int pxIdxY = blockIdx.y / numFilterBlocks;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * B_Y * filtersPerThread;
const int pxIdx = pxIdxY * imgSize + pxIdxX;
const int startPxX = -sizeX/2 + pxIdxX;
const int startPxY = -sizeX/2 + pxIdxY;
const int imgIdx = blockImgIdx + threadIdx.x;
imgs += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx;
meanDiffs += (blockFilterIdx + threadIdx.y) * imgPixels * numImages + imgIdx;
denoms += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx;
target += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] = 0;
}
}
}
const int loopStartY = MAX(0, startPxY);
const int loopStartX = MAX(0, startPxX);
const int loopEndY = MIN(imgSize, startPxY + sizeX);
const int loopEndX = MIN(imgSize, startPxX + sizeX);
for (int y = loopStartY; y < loopEndY; y++) {
for (int x = loopStartX; x < loopEndX; x++) {
const int imgPx = y * imgSize + x;
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] += square(meanDiffs[(f * B_Y * imgPixels + imgPx) * numImages + i * B_X]);
}
}
}
}
}
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] = 1 + addScale * prod[f][i];
denoms[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i];
target[f * B_Y * imgPixels * numImages + i * B_X] = imgs[f * B_Y * imgPixels * numImages + i * B_X] * __powf(prod[f][i], -powScale);
}
}
}
}
/*
* Block size 16xB_X
* blockIdx.x determines 4x4 pixel.x region, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines 4x4 pixel.y region, filter idx in batches of filtersPerThread
*
* So each block does 4x4 region of pixels for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines pixel idx
*
* imgs: (numFilters, imgPixels, numImages)
* means: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages) (out)
* target: (numFilters, imgPixels, numImages) (out)
*
* B_X one of 8, 16, 32
* imgsPerThread one of 1, 2, 4, 8, 16
*
* B_XximgsPerThread MUST be divisible by 32.
* Number of filters MUST be divisible by filtersPerThread.
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
* numFilters must be divisible by filtersPerThread
*
* Final write-out will not be fully coalesced unless B_X is 32. But there's a lot more
* reading than writing here, and the reading is all coalesced, so it should be OK.
*/
template<int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds>
__global__ void kCNorm2(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize,
const int numFilters, const int numImages, const int sizeX, const float addScale, const float powScale) {
__shared__ float shDiffs[filtersPerThread][B_X*imgsPerThread];
const int imgPixels = imgSize * imgSize;
const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread);
const int numFilterBlocks = numFilters/(filtersPerThread);
const int blockPxX = 4*(blockIdx.x / numImgBlocks);
const int blockPxY = 4*(blockIdx.y / numFilterBlocks);
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * filtersPerThread;
const int tidx = threadIdx.y * B_X + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
const int startPxX = MAX(0, -sizeX/2 + blockPxX);
const int startPxY = MAX(0, -sizeX/2 + blockPxY);
const int endPxX = MIN(imgSize, blockPxX + DIVUP(sizeX, 2) + 3);
const int endPxY = MIN(imgSize, blockPxY + DIVUP(sizeX, 2) + 3);
const int myPxX = blockPxX + threadIdx.y % 4;
const int myPxY = blockPxY + threadIdx.y / 4;
const int myPxIdx = myPxY * imgSize + myPxX;
// const bool doWork = myPxX < imgSize && myPxY < imgSize;
const int myStartPxY = -sizeX/2 + myPxY;
const int myStartPxX = -sizeX/2 + myPxX;
const int myEndPxY = myPxY + DIVUP(sizeX, 2);
const int myEndPxX = myPxX + DIVUP(sizeX, 2);
const int imgIdx = blockImgIdx + threadIdx.x;
imgs += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx;
meanDiffs += (blockFilterIdx + loadY) * imgPixels * numImages + blockImgIdx + loadX;
denoms += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx;
target += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] = 0;
}
}
}
for (int y = startPxY; y < endPxY; y++) {
const bool isInY = y >= myStartPxY && y < myEndPxY;
for (int x = startPxX; x < endPxX; x++) {
const int px = y * imgSize + x;
// All the threads load a pixel from memory
#pragma unroll
for (int ly = 0; ly < filtersPerThread; ly += B_X/2) {
if (filtersPerThread % (B_X/2) == 0 || ly + loadY < filtersPerThread) {
#pragma unroll
for (int lx = 0; lx < B_X*imgsPerThread; lx += 32) {
if (!checkCaseBounds || lx + loadX + blockImgIdx < numImages) {
shDiffs[ly + loadY][lx + loadX] = meanDiffs[(ly * imgPixels + px) * numImages + lx];
}
}
}
}
__syncthreads();
// Each row of threads decides if it's interested in this pixel
if (isInY && x >= myStartPxX && x < myEndPxX) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] += square(shDiffs[f][threadIdx.x + i * B_X]);
}
}
}
}
__syncthreads();
}
}
// imgs -= (loadY * imgPixels - myPxIdx) * numImages + loadX;
// imgs += threadIdx.x;
if (myPxX < imgSize && myPxY < imgSize) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] = 1 + addScale * prod[f][i];
denoms[f * imgPixels * numImages + i * B_X] = prod[f][i];
target[f * imgPixels * numImages + i * B_X] = imgs[f * imgPixels * numImages + i * B_X] * __powf(prod[f][i], -powScale);
}
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y
*
* So each block does one pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* imgs: (numFilters, imgPixels, numImages)
* meanDiffs: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages) (out)
* target: (numFilters, imgPixels, numImages) (out)
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
* numFilters must be divisible by B_Y
*/
template<int B_Y, int B_X, int imgsPerThread, bool checkCaseBounds, bool blocked>
__global__ void kFCNorm(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize,
const int numFilters, const int numImages, const int sizeF,
const float addScale, const float powScale) {
const int imgPixels = imgSize * imgSize;
const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread);
const int numFilterBlocks = numFilters/B_Y;
const int pxIdxX = blockIdx.x / numImgBlocks;
const int pxIdxY = blockIdx.y / numFilterBlocks;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int filterIdx = (blockIdx.y % numFilterBlocks) * B_Y + threadIdx.y;
const int pxIdx = pxIdxY * imgSize + pxIdxX;
const int imgIdx = blockImgIdx + threadIdx.x;
imgs += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx;
meanDiffs += pxIdx * numImages + imgIdx;
denoms += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx;
target += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx;
float prod[imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
prod[i] = 0;
}
}
const int startF = blocked ? (filterIdx / sizeF) * sizeF : -sizeF/2 + filterIdx;
const int loopStartF = blocked ? startF : MAX(0, startF);
const int loopEndF = MIN(numFilters, startF + sizeF);
for (int f = loopStartF; f < loopEndF; ++f) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
prod[i] += square(meanDiffs[f * imgPixels * numImages + i * B_X]);
}
}
}
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
prod[i] = 1 + addScale * prod[i];
denoms[i * B_X] = prod[i];
target[i * B_X] = imgs[i * B_X] * __powf(prod[i], -powScale);
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y
*
* So each block does one output pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* outGrads: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages)
* inputs: (numFilters, imgPixels, numImages)
* acts: (numFilters, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages)
*
* numImages must be divisible by B_X*imgsPerThread
* numFilters must be divisible by B_Y
*
* TODO: this isn't really ideal
*/
template<int B_Y, int B_X, int imgsPerThread, bool add, bool checkCaseBounds, bool blocked>
__global__ void kFRNormUndo(float* outGrads, float* denoms, float* inputs, float* acts, float* target, const int imgSize, const int numFilters,
const int numImages, const int sizeF, const float powScale, const float scaleTargets, const float scaleOutputs) {
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int numFilterBlocks = numFilters/B_Y;
const int pxIdxX = blockIdx.x / numImgBlocks;
const int pxIdxY = blockIdx.y / numFilterBlocks;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int filterIdx = (blockIdx.y % numFilterBlocks) * B_Y + threadIdx.y;
const int imgPixels = imgSize * imgSize;
const int pxIdx = pxIdxY * imgSize + pxIdxX;
const int imgIdx = blockImgIdx + threadIdx.x;
acts += pxIdx * numImages + imgIdx;
inputs += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx;
denoms += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx;
outGrads += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx;
target += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx;
float prod[imgsPerThread];
// if (imgIdx != 0 || pxIdx != 0 || filterIdx != 0) {
// return;
// }
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[i] = 0;
}
const int startF = blocked ? (filterIdx / sizeF) * sizeF : -sizeF + sizeF/2 + 1 + filterIdx;
const int loopStartF = blocked ? startF : MAX(0, startF);
const int loopEndF = MIN(numFilters, startF + sizeF);
for (int f = loopStartF; f < loopEndF; ++f) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
prod[i] += acts[f * imgPixels * numImages + i * B_X];
}
}
}
// printf("gpu f start: %d, end: %d\n", loopStartF, loopEndF);
if (!add) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
const float inp = inputs[i * B_X];
const float out = outGrads[i * B_X];
const float den = denoms[i * B_X];
prod[i] = inp * prod[i] + out * __powf(den, -powScale);
target[i * B_X] = prod[i];
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
const float inp = inputs[i * B_X];
const float out = outGrads[i * B_X];
const float den = denoms[i * B_X];
prod[i] = inp * prod[i] + out * __powf(den, -powScale);
target[i * B_X] = scaleTargets * target[i * B_X] + scaleOutputs * prod[i];
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread
*
* So each block does one pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* imgs: (numFilters, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages)
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
* numFilters must be divisible by B_Y*filtersPerThread
*
* sizeX should be something like 3 or 5 for this function. Not much more.
* TODO: write variant where each block does 4x4 region or so (this'll be based on kCNorm2).
*/
template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds>
__global__ void kTICA_manyfilter(float* imgs, float* target, const int imgSize,
const int numFilters, const int numImages, const int sizeX,
const float scaleTarget, const float scaleOutput) {
const int imgPixels = imgSize * imgSize;
const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread);
const int numFilterBlocks = numFilters/(B_Y*filtersPerThread);
const int pxIdxX = blockIdx.x / numImgBlocks;
const int pxIdxY = blockIdx.y / numFilterBlocks;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * B_Y * filtersPerThread;
const int pxIdx = pxIdxY * imgSize + pxIdxX;
const int startPxX = -sizeX/2 + pxIdxX;
const int startPxY = -sizeX/2 + pxIdxY;
const int imgIdx = blockImgIdx + threadIdx.x;
imgs += ((blockFilterIdx + threadIdx.y) * imgPixels) * numImages + imgIdx;
target += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] = 0;
}
}
}
const int loopStartY = MAX(0, startPxY);
const int loopStartX = MAX(0, startPxX);
const int loopEndY = MIN(imgSize, startPxY + sizeX);
const int loopEndX = MIN(imgSize, startPxX + sizeX);
for (int y = loopStartY; y < loopEndY; y++) {
for (int x = loopStartX; x < loopEndX; x++) {
const int imgPx = y * imgSize + x;
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] += square(imgs[(f * B_Y * imgPixels + imgPx) * numImages + i * B_X]);
}
}
}
}
}
imgs += pxIdx * numImages;
if (scaleTarget == 0) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
target[f * B_Y * imgPixels * numImages + i * B_X] = scaleOutput * __fdividef(1.0f, 0.001 + sqrtf(prod[f][i]));
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTarget * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutput * __fdividef(1.0f, 0.001 + sqrtf(prod[f][i]));
}
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread
*
* So each block does one pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* imgs: (numFilters, imgPixels, numImages)
* ticas: (numFilters, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages)
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
* numFilters must be divisible by B_Y*filtersPerThread
*
* sizeX should be something like 3 or 5 for this function. Not much more.
* TODO: write variant where each block does 4x4 region or so (this'll be based on kCNorm2).
*/
template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds>
__global__ void kTICAGrad_manyfilter(float* imgs, float* ticas, float* target, const int imgSize,
const int numFilters, const int numImages, const int sizeX,
const float scaleTarget, const float scaleOutput) {
const int imgPixels = imgSize * imgSize;
const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread);
const int numFilterBlocks = numFilters/(B_Y*filtersPerThread);
const int pxIdxX = blockIdx.x / numImgBlocks;
const int pxIdxY = blockIdx.y / numFilterBlocks;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * B_Y * filtersPerThread;
const int pxIdx = pxIdxY * imgSize + pxIdxX;
const int startPxX = -sizeX/2 + pxIdxX;
const int startPxY = -sizeX/2 + pxIdxY;
const int imgIdx = blockImgIdx + threadIdx.x;
imgs += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx;
ticas += ((blockFilterIdx + threadIdx.y) * imgPixels) * numImages + imgIdx;
target += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] = 0;
}
}
}
const int loopStartY = MAX(0, startPxY);
const int loopStartX = MAX(0, startPxX);
const int loopEndY = MIN(imgSize, startPxY + sizeX);
const int loopEndX = MIN(imgSize, startPxX + sizeX);
for (int y = loopStartY; y < loopEndY; y++) {
for (int x = loopStartX; x < loopEndX; x++) {
const int imgPx = y * imgSize + x;
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
// adding 1/S values
prod[f][i] += ticas[(f * B_Y * imgPixels + imgPx) * numImages + i * B_X];
}
}
}
}
}
if (scaleTarget == 0) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
target[f * B_Y * imgPixels * numImages + i * B_X] = scaleOutput * -imgs[f * B_Y * imgPixels * numImages + i * B_X] * prod[f][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTarget * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutput * -imgs[f * B_Y * imgPixels * numImages + i * B_X] * sqrtf(prod[f][i]);
}
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread
*
* So each block does one output pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* imgs: (numFilters, imgPixels, numImages)
* maxGrads: (numFilters, numOutputs, numImages)
* rMaxActs: (numFilters, numOutputs, numImages)
* target: (numFilters, imgPixels, numImages)
*
* numImages must be divisible by B_X*imgsPerThread
* numFilters must be divisible by B_Y*filtersPerThread
*/
template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds>
__global__ void kLocalAvgUndo(float* avgGrads, float* target, const int imgSize, const int numFilters,
const int numImages, const int subsX, const int startX, const int strideX, const int outputsX,
const float scaleTargets, const float scaleOutputs) {
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int blockPxX = blockIdx.x / numImgBlocks;
const int blockPxY = blockIdx.y / (numFilters/(B_Y*filtersPerThread));
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % (numFilters/(B_Y*filtersPerThread))) * B_Y * filtersPerThread;
const int blockPx = blockPxY * imgSize + blockPxX;
const int numOutputs = outputsX * outputsX;
const int imgPixels = imgSize * imgSize;
const int startOutputY = blockPxY - startX < subsX ? 0 : 1 + (blockPxY - startX - subsX) / strideX;
const int endOutputY = MIN(outputsX, 1 + (blockPxY - startX) / strideX);
const int startOutputX = blockPxX - startX < subsX ? 0 : 1 + (blockPxX - startX - subsX) / strideX;
const int endOutputX = MIN(outputsX, 1 + (blockPxX - startX) / strideX);
const int imgIdx = blockImgIdx + threadIdx.x;
avgGrads += ((blockFilterIdx + threadIdx.y) * numOutputs) * numImages + imgIdx;
target += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[f][i] = 0;
}
}
if (blockPxX >= startX && blockPxX < startX + strideX * (outputsX-1) + subsX
&& blockPxY >= startX && blockPxY < startX + strideX * (outputsX-1) + subsX) {
for (int my = startOutputY; my < endOutputY; my++) {
const float regionStartY = fmaxf(0, startX + my * strideX);
const float regionEndY = fminf(imgSize, startX + my * strideX + subsX);
const float regionSizeY = regionEndY - regionStartY;
for (int mx = startOutputX; mx < endOutputX; mx++) {
const int outputIdx = my * outputsX + mx;
const float regionStartX = fmaxf(0, startX + mx * strideX);
const float regionEndX = fminf(imgSize, startX + mx * strideX + subsX);
const float regionSizeX = regionEndX - regionStartX;
// It's important to do the division here, because pushing division into the below
// loops makes the code 4x slower.
const float regionSizeInv = 1.0f / (regionSizeX * regionSizeY);
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] += avgGrads[(f * B_Y * numOutputs + outputIdx) * numImages + i * B_X] * regionSizeInv;
}
}
}
}
}
}
if (!add) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
target[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i];
}
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread
*
* So each block does one output pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* imgs: (numFilters, imgPixels, numImages)
* maxGrads: (numFilters, numOutputs, numImages)
* maxActs: (numFilters, numOutputs, numImages)
* target: (numFilters, imgPixels, numImages)
*
* numImages must be divisible by B_X*imgsPerThread
* numFilters must be divisible by B_Y*filtersPerThread
*/
template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds>
__global__ void kLocalMaxUndo(float* imgs, float* maxGrads, float* maxActs, float* target, const int imgSize, const int numFilters,
const int numImages, const int subsX, const int startX, const int strideX, const int outputsX,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shImgs[B_Y*filtersPerThread][B_X*imgsPerThread];
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int blockPxX = blockIdx.x / numImgBlocks;
const int blockPxY = blockIdx.y / (numFilters/(B_Y*filtersPerThread));
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % (numFilters/(B_Y*filtersPerThread))) * B_Y * filtersPerThread;
const int blockPx = blockPxY * imgSize + blockPxX;
const int numOutputs = outputsX * outputsX;
const int imgPixels = imgSize * imgSize;
const int startOutputY = blockPxY - startX < subsX ? 0 : 1 + (blockPxY - startX - subsX) / strideX;
const int endOutputY = MIN(outputsX, 1 + (blockPxY - startX) / strideX);
const int startOutputX = blockPxX - startX < subsX ? 0 : 1 + (blockPxX - startX - subsX) / strideX;
const int endOutputX = MIN(outputsX, 1 + (blockPxX - startX) / strideX);
const int imgIdx = blockImgIdx + threadIdx.x;
imgs += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx;
maxGrads += ((blockFilterIdx + threadIdx.y) * numOutputs) * numImages
+ imgIdx;
maxActs += ((blockFilterIdx + threadIdx.y) * numOutputs) * numImages
+ imgIdx;
target += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[f][i] = 0;
}
}
if (blockPxX >= startX && blockPxX < startX + strideX * (outputsX-1) + subsX
&& blockPxY >= startX && blockPxY < startX + strideX * (outputsX-1) + subsX) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
shImgs[threadIdx.y + B_Y * f][threadIdx.x + B_X * i] = imgs[f * B_Y * imgPixels * numImages + i * B_X];
}
}
}
for (int my = startOutputY; my < endOutputY; my++) {
for (int mx = startOutputX; mx < endOutputX; mx++) {
const int outputIdx = my * outputsX + mx;
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
const float ma = maxActs[(f * B_Y * numOutputs + outputIdx) * numImages + i * B_X];
const float mg = maxGrads[(f * B_Y * numOutputs + outputIdx) * numImages + i * B_X];
const float img = shImgs[threadIdx.y + B_Y * f][threadIdx.x + B_X * i];
prod[f][i] += (img == ma) * mg;
}
}
}
}
}
}
if (!add) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
target[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i];
}
}
}
}
}
/*
* acts := -2 x scale x acts x outGrads / denoms
*/
template<int B_X, int eltsPerThread>
__global__ void kRNormUndoPrelims(float* acts, float* denoms, float* outGrads,
const uint numElements, const float scale) {
const uint e = B_X * blockIdx.x * eltsPerThread + threadIdx.x;
const uint numThreads = B_X * gridDim.x;
for (uint i = e; i < numElements; i += numThreads*eltsPerThread) {
#pragma unroll
for (uint k = 0; k < eltsPerThread; k++) {
if (i + k * B_X < numElements) {
acts[i + k * B_X] = __fdividef(scale*outGrads[i + k * B_X] * acts[i + k * B_X], denoms[i + k * B_X]);
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread
*
* So each block does one output pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* outGrads: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages)
* inputs: (numFilters, imgPixels, numImages)
* acts: (numFilters, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages)
*
* numImages must be divisible by B_X*imgsPerThread
* numFilters must be divisible by B_Y*filtersPerThread
*
* TODO: this isn't really ideal
*/
template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds>
__global__ void kRNormUndo(float* outGrads, float* denoms, float* inputs, float* acts, float* target, const int imgSize, const int numFilters,
const int numImages, const int sizeX, const float powScale, const float scaleTargets, const float scaleOutputs) {
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int numFilterBlocks = numFilters/(B_Y*filtersPerThread);
const int blockPxX = blockIdx.x / numImgBlocks;
const int blockPxY = blockIdx.y / numFilterBlocks;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * B_Y * filtersPerThread;
const int blockPx = blockPxY * imgSize + blockPxX;
const int imgPixels = imgSize * imgSize;
const int startY = MAX(0, blockPxY + sizeX/2 - sizeX + 1);
const int startX = MAX(0, blockPxX + sizeX/2 - sizeX + 1);
const int endY = MIN(imgSize, blockPxY + sizeX/2 + 1);
const int endX = MIN(imgSize, blockPxX + sizeX/2 + 1);
const int imgIdx = blockImgIdx + threadIdx.x;
acts += ((blockFilterIdx + threadIdx.y) * imgPixels) * numImages + imgIdx;
inputs += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx;
denoms += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx;
outGrads += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx;
target += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[f][i] = 0;
}
}
for (int sy = startY; sy < endY; sy++) {
for (int sx = startX; sx < endX; sx++) {
const int outPx = sy * imgSize + sx;
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] += acts[(f * B_Y * imgPixels + outPx) * numImages + i * B_X];
}
}
}
}
}
// outGrads += blockPx * numImages;
if (!add) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
const float inp = inputs[(f * B_Y * imgPixels) * numImages + i * B_X];
const float out = outGrads[(f * B_Y * imgPixels) * numImages + i * B_X];
const float den = denoms[(f * B_Y * imgPixels) * numImages + i * B_X];
prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale);
target[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
const float inp = inputs[(f * B_Y * imgPixels) * numImages + i * B_X];
const float out = outGrads[(f * B_Y * imgPixels) * numImages + i * B_X];
const float den = denoms[(f * B_Y * imgPixels) * numImages + i * B_X];
prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale);
target[f * B_Y * imgPixels * numImages + i * B_X] =
scaleTargets * target[f * B_Y * imgPixels * numImages + i * B_X]
+ scaleOutputs * prod[f][i];
}
}
}
}
}
/*
* Block size 16xB_X
* blockIdx.x determines 4x4 pixel.x region, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines 4x4 pixel.y region, filter idx in batches of filtersPerThread
*
* So each block does 4x4 region for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines pixel idx
*
* outGrads: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages)
* inputs: (numFilters, imgPixels, numImages)
* acts: (numFilters, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages)
*
* B_X one of 8, 16, 32
* imgsPerThread one of 1, 2, 4, 8, 16
*
* B_XximgsPerThread MUST be divisible by 32.
* Number of filters MUST be divisible by filtersPerThread.
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
* numFilters must be divisible by filtersPerThread
*
* Final write-out will not be fully coalesced unless B_X is 32. But there's a lot more
* reading than writing here, and the reading is all coalesced, so it should be OK.
*/
template<int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds>
__global__ void kRNormUndo2(float* outGrads, float* denoms, float* inputs, float* acts, float* target, const int imgSize, const int numFilters,
const int numImages, const int sizeX, const float powScale, const float scaleTargets, const float scaleOutputs) {
__shared__ float shActs[filtersPerThread][B_X*imgsPerThread];
const int imgPixels = imgSize * imgSize;
const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread);
const int numFilterBlocks = numFilters/(filtersPerThread);
const int blockPxX = 4*(blockIdx.x / numImgBlocks);
const int blockPxY = 4*(blockIdx.y / numFilterBlocks);
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * filtersPerThread;
const int tidx = threadIdx.y * B_X + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
const int startPxX = MAX(0, -DIVUP(sizeX,2) + blockPxX + 1);
const int startPxY = MAX(0, -DIVUP(sizeX,2) + blockPxY + 1);
const int endPxX = MIN(imgSize, blockPxX + sizeX/2 + 4);
const int endPxY = MIN(imgSize, blockPxY + sizeX/2 + 4);
const int myPxX = blockPxX + threadIdx.y % 4;
const int myPxY = blockPxY + threadIdx.y / 4;
const int myPxIdx = myPxY * imgSize + myPxX;
// const bool doWork = myPxX < imgSize && myPxY < imgSize;
const int myStartPxY = -DIVUP(sizeX,2) + myPxY + 1;
const int myStartPxX = -DIVUP(sizeX,2) + myPxX + 1;
const int myEndPxY = myPxY + sizeX/2 + 1;
const int myEndPxX = myPxX + sizeX/2 + 1;
const int imgIdx = blockImgIdx + threadIdx.x;
acts += (blockFilterIdx + loadY) * imgPixels * numImages + blockImgIdx + loadX;
denoms += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx;
inputs += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx;
outGrads += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx;
target += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[f][i] = 0;
}
}
for (int y = startPxY; y < endPxY; y++) {
const bool isInY = y >= myStartPxY && y < myEndPxY;
for (int x = startPxX; x < endPxX; x++) {
const int px = y * imgSize + x;
// All the threads load a pixel from memory
#pragma unroll
for (int ly = 0; ly < filtersPerThread; ly += B_X/2) {
if (filtersPerThread % (B_X/2) == 0 || ly + loadY < filtersPerThread) {
#pragma unroll
for (int lx = 0; lx < B_X*imgsPerThread; lx += 32) {
if (!checkCaseBounds || lx + loadX + blockImgIdx < numImages) {
shActs[ly + loadY][lx + loadX] = acts[(ly * imgPixels + px) * numImages + lx];
}
}
}
}
__syncthreads();
// Each row of threads decides if it's interested in this pixel
if (isInY && x >= myStartPxX && x < myEndPxX) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] += shActs[f][threadIdx.x + i * B_X];
}
}
}
}
__syncthreads();
}
}
acts -= (loadY * imgPixels - myPxIdx) * numImages + loadX;
acts += threadIdx.x;
if (myPxX < imgSize && myPxY < imgSize) {
if (!add) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
const float out = outGrads[f * imgPixels * numImages + i * B_X];
const float den = denoms[f * imgPixels * numImages + i * B_X];
const float inp = inputs[f * imgPixels * numImages + i * B_X];
prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale);
target[f * imgPixels * numImages + i * B_X] = prod[f][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
const float out = outGrads[f * imgPixels * numImages + i * B_X];
const float den = denoms[f * imgPixels * numImages + i * B_X];
const float inp = inputs[f * imgPixels * numImages + i * B_X];
prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale);
target[f * imgPixels * numImages + i * B_X] = scaleTargets * target[f * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i];
}
}
}
}
}
}
void convLocalMaxUndo(NVMatrix& images, NVMatrix& maxGrads, NVMatrix& maxActs, NVMatrix& target,
int subsX, int startX, int strideX, int outputsX) {
convLocalMaxUndo(images, maxGrads, maxActs, target, subsX, startX, strideX, outputsX, 0, 1);
}
/*
* imgs: (numFilters, imgPixels, numImages)
* maxGrads: (numFilters, numOutputs, numImages)
* rMaxActs: (numFilters, numOutputs, numImages)
* target: (numFilters, imgPixels, numImages)
*/
void convLocalMaxUndo(NVMatrix& images, NVMatrix& maxGrads, NVMatrix& maxActs, NVMatrix& target,
int subsX, int startX, int strideX, int outputsX, float scaleTargets, float scaleOutput) {
int outputs = outputsX * outputsX;
int numImages = images.getNumCols();
int numFilters = maxGrads.getNumRows() / outputs;
int imgPixels = images.getNumRows() / numFilters;
assert(images.getNumRows() == numFilters * imgPixels);
int imgSize = int(sqrt(imgPixels));
assert(imgSize * imgSize == imgPixels);
assert(maxGrads.getNumRows() == numFilters * outputs);
assert(maxGrads.getNumCols() == numImages);
assert(!images.isTrans());
assert(!target.isTrans());
assert(!maxGrads.isTrans());
assert(!maxActs.isTrans());
assert(images.isContiguous());
assert(maxGrads.isContiguous());
assert(maxActs.isContiguous());
assert(maxGrads.isSameDims(maxActs));
assert(numFilters % 16 == 0);
// assert(numImages % 128 == 0);
assert(strideX <= subsX);
target.resize(images);
assert(target.isContiguous());
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
int checkCaseBounds = numImages % (32*imgsPerThread) != 0;
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages,32*imgsPerThread) * imgSize, (numFilters / (4 * 2)) * imgSize);
if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 4, 2, false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
} else {
hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 4, 2, true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 4, 2, false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
} else {
hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 4, 2, true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 2, 2, false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
} else {
hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 2, 2, true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 2, 2, false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
} else {
hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 2, 2, true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 1, 2, false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
} else {
hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 1, 2, true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 1, 2, false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
} else {
hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 1, 2, true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
}
}
}
getLastCudaError("convLocalMaxUndo: kernel execution failed");
}
void convLocalAvgUndo(NVMatrix& avgGrads, NVMatrix& target, int subsX, int startX, int strideX, int outputsX, int imgSize) {
convLocalAvgUndo(avgGrads, target, subsX, startX, strideX, outputsX, imgSize, 0, 1);
}
/*
* avgGrads: (numFilters, numOutputs, numImages)
* target: (numFilters, imgPixels, numImages)
*/
void convLocalAvgUndo(NVMatrix& avgGrads, NVMatrix& target,
int subsX, int startX, int strideX, int outputsX, int imgSize,
float scaleTargets, float scaleOutput) {
int numImages = avgGrads.getNumCols();
int outputs = outputsX * outputsX;
int imgPixels = imgSize * imgSize;
int numFilters = avgGrads.getNumRows() / outputs;
assert(avgGrads.getNumRows() == numFilters * outputs);
assert(!target.isTrans());
assert(!avgGrads.isTrans());
assert(avgGrads.isContiguous());
assert(numFilters % 16 == 0);
// assert(numImages % 128 == 0);
assert(strideX <= subsX);
target.resize(numFilters * imgPixels, numImages);
assert(target.isContiguous());
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
int checkCaseBounds = numImages % (32*imgsPerThread) != 0;
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages,32*imgsPerThread) * imgSize, (numFilters / (4 * 4)) * imgSize);
if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 4, 4, false, true>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
} else {
hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 4, 4, true, true>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 4, 4, false, false>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
} else {
hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 4, 4, true, false>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 2, 4, false, true>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
} else {
hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 2, 4, true, true>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 2, 4, false, false>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
} else {
hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 2, 4, true, false>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 1, 4, false, true>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
} else {
hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 1, 4, true, true>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 1, 4, false, false>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
} else {
hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 1, 4, true, false>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
}
}
}
getLastCudaError("convLocalAvgUndo: kernel execution failed");
}
void convResponseNorm(NVMatrix& images, NVMatrix& denoms, NVMatrix& target, int numFilters, int sizeX, float addScale, float powScale) {
convContrastNorm(images, images, denoms, target, numFilters, sizeX, addScale, powScale);
}
/*
* images: (numFilters, imgPixels, numImages)
* meanDiffs: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages) (out)
* target: (numFilters, imgPixels, numImages) (out)
*/
void convContrastNorm(NVMatrix& images, NVMatrix& meanDiffs, NVMatrix& denoms, NVMatrix& target, int numFilters, int sizeX, float addScale, float powScale) {
int numImages = images.getNumCols();
int imgPixels = images.getNumRows() / numFilters;
assert(images.getNumRows() == numFilters * imgPixels);
int imgSize = int(sqrt(imgPixels));
assert(imgSize * imgSize == imgPixels);
assert(meanDiffs.isSameDims(images));
assert(!meanDiffs.isTrans());
assert(!images.isTrans());
assert(images.isContiguous());
assert(meanDiffs.isContiguous());
assert(numFilters % 16 == 0 || numFilters <= 8);
target.resize(images);
denoms.resize(images);
assert(target.isContiguous());
if (sizeX >= 6 && numFilters % 4 == 0) {
// This one is faster for large regions (my tests show regions >= 6...)
int imgsPerThread = 8;
int filtersPerThread = 4;
int bx = 8;
bool checkCaseBounds = numImages % (bx*imgsPerThread) != 0;
assert((imgsPerThread * bx) % 32 == 0);
assert(numFilters % filtersPerThread == 0);
dim3 threads(bx, 16);
dim3 blocks(DIVUP(imgSize, 4) * DIVUP(numImages, bx*imgsPerThread), DIVUP(imgSize, 4) * numFilters / filtersPerThread);
if (checkCaseBounds) {
hipFuncSetCacheConfig(kCNorm2<8, 8, 4, true>, hipFuncCachePreferL1); // L1 faster here
hipLaunchKernelGGL(( kCNorm2<8, 8, 4, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, sizeX, addScale, powScale);
} else {
hipFuncSetCacheConfig(kCNorm2<8, 8, 4, false>, hipFuncCachePreferL1); // L1 faster here
hipLaunchKernelGGL(( kCNorm2<8, 8, 4, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, sizeX, addScale, powScale);
}
} else {
bool checkCaseBounds = numImages % 128 != 0;
if (numFilters <= 8) {
dim3 threads(128);
dim3 blocks(DIVUP(numImages,128) * imgSize, imgSize);
if (numFilters == 1) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kCNorm_fewfilter<1, 1, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_fewfilter<1, 1, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
} else {
hipFuncSetCacheConfig(kCNorm_fewfilter<1, 1, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_fewfilter<1, 1, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
}
} else if (numFilters == 2) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kCNorm_fewfilter<1, 2, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_fewfilter<1, 2, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
} else {
hipFuncSetCacheConfig(kCNorm_fewfilter<1, 2, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_fewfilter<1, 2, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
}
} else if (numFilters == 3) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kCNorm_fewfilter<1, 3, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_fewfilter<1, 3, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
} else {
hipFuncSetCacheConfig(kCNorm_fewfilter<1, 3, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_fewfilter<1, 3, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
}
} else if (numFilters == 4) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kCNorm_fewfilter<1, 4, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_fewfilter<1, 4, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
} else {
hipFuncSetCacheConfig(kCNorm_fewfilter<1, 4, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_fewfilter<1, 4, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
}
} else if (numFilters == 5) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kCNorm_fewfilter<1, 5, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_fewfilter<1, 5, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
} else {
hipFuncSetCacheConfig(kCNorm_fewfilter<1, 5, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_fewfilter<1, 5, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
}
} else if (numFilters == 6) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kCNorm_fewfilter<1, 6, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_fewfilter<1, 6, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
} else {
hipFuncSetCacheConfig(kCNorm_fewfilter<1, 6, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_fewfilter<1, 6, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
}
} else if (numFilters == 7) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kCNorm_fewfilter<1, 7, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_fewfilter<1, 7, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
} else {
hipFuncSetCacheConfig(kCNorm_fewfilter<1, 7, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_fewfilter<1, 7, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
}
} else if (numFilters == 8) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kCNorm_fewfilter<1, 8, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_fewfilter<1, 8, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
} else {
hipFuncSetCacheConfig(kCNorm_fewfilter<1, 8, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_fewfilter<1, 8, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
}
}
} else {
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages,32*4) * imgSize, (numFilters / (4 * 2)) * imgSize);
if (checkCaseBounds) {
hipFuncSetCacheConfig(kCNorm_manyfilter<4, 32, 4, 2, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_manyfilter<4, 32, 4, 2, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, sizeX, addScale, powScale);
} else {
hipFuncSetCacheConfig(kCNorm_manyfilter<4, 32, 4, 2, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_manyfilter<4, 32, 4, 2, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, sizeX, addScale, powScale);
}
}
}
getLastCudaError("convResponseNorm: kernel execution failed");
}
void convContrastNormUndo(NVMatrix& outGrads, NVMatrix& denoms, NVMatrix& meanDiffs, NVMatrix& acts, NVMatrix& target, int numFilters,
int sizeX, float addScale, float powScale, float scaleTargets, float scaleOutput) {
convResponseNormUndo(outGrads, denoms, meanDiffs, acts, target, numFilters, sizeX, addScale, powScale, scaleTargets, scaleOutput);
}
/*
* outGrads: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages)
* inputs: (numFilters, imgPixels, numImages)
* acts: (numFilters, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages)
*
* THIS WILL OVERWRITE THE ACTS MATRIX.
*/
void convResponseNormUndo(NVMatrix& outGrads, NVMatrix& denoms, NVMatrix& inputs, NVMatrix& acts, NVMatrix& target, int numFilters,
int sizeX, float addScale, float powScale, float scaleTargets, float scaleOutput) {
int numImages = outGrads.getNumCols();
int imgPixels = outGrads.getNumRows() / numFilters;
int imgSize = int(sqrt(imgPixels));
assert(imgSize * imgSize == imgPixels);
assert(outGrads.getNumRows() == numFilters * imgPixels);
assert(denoms.isSameDims(outGrads));
assert(acts.isSameDims(denoms));
assert(!denoms.isTrans());
assert(!outGrads.isTrans());
assert(!acts.isTrans());
assert(!target.isTrans());
assert(outGrads.isContiguous());
assert(numFilters % 16 == 0);
target.resize(outGrads);
assert(target.isContiguous());
// First do acts := -2 x scale x acts x outGrads / denoms
// so that the main routine only has to do an addition in its inner loop.
int prelimEltsPerThread = 4;
dim3 threads(128);
dim3 blocks(MIN(512, DIVUP(outGrads.getNumElements(),(threads.x * prelimEltsPerThread))));
hipLaunchKernelGGL(( kRNormUndoPrelims<128, 4>), dim3(blocks), dim3(threads), 0, 0, acts.getDevData(), denoms.getDevData(), outGrads.getDevData(), outGrads.getNumElements(), -2*addScale*powScale);
// Now the main routine
if (sizeX >= 6 && numFilters % 4 == 0) {
// This one is faster for large regions (my tests show regions >= 6...)
int imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2;
int filtersPerThread = 4;
int bx = 16;
bool checkCaseBounds = numImages % (bx*imgsPerThread) != 0;
assert((imgsPerThread * bx) % 32 == 0);
threads = dim3(bx, 16);
blocks = dim3(DIVUP(imgSize, 4) * DIVUP(numImages, bx*imgsPerThread), DIVUP(imgSize, 4) * numFilters / filtersPerThread);
if (imgsPerThread == 8) {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
hipFuncSetCacheConfig(kRNormUndo2<16, 8, 4, true, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo2<16, 8, 4, true, true>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kRNormUndo2<16, 8, 4, false, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo2<16, 8, 4, false, true>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
hipFuncSetCacheConfig(kRNormUndo2<16, 8, 4, true, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo2<16, 8, 4, true, false>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kRNormUndo2<16, 8, 4, false, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo2<16, 8, 4, false, false>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
hipFuncSetCacheConfig(kRNormUndo2<16, 4, 4, true, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo2<16, 4, 4, true, true>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kRNormUndo2<16, 4, 4, false, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo2<16, 4, 4, false, true>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
hipFuncSetCacheConfig(kRNormUndo2<16, 4, 4, true, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo2<16, 4, 4, true, false>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kRNormUndo2<16, 4, 4, false, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo2<16, 4, 4, false, false>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
hipFuncSetCacheConfig(kRNormUndo2<16, 2, 4, true, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo2<16, 2, 4, true, true>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kRNormUndo2<16, 2, 4, false, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo2<16, 2, 4, false, true>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
hipFuncSetCacheConfig(kRNormUndo2<16, 2, 4, true, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo2<16, 2, 4, true, false>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kRNormUndo2<16, 2, 4, false, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo2<16, 2, 4, false, false>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
}
}
} else {
int imgsPerThread = numImages % 64 == 0 ? 2 : 1;
bool checkCaseBounds = numImages % (32*imgsPerThread) != 0;
threads = dim3(32, 4);
blocks = dim3(DIVUP(numImages,32*imgsPerThread) * imgSize, (numFilters / (4 * 2)) * imgSize);
if (imgsPerThread == 2) {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
hipFuncSetCacheConfig(kRNormUndo<4, 32, 2, 2, false, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo<4, 32, 2, 2, false, true>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kRNormUndo<4, 32, 2, 2, true, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo<4, 32, 2, 2, true, true>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
hipFuncSetCacheConfig(kRNormUndo<4, 32, 2, 2, false, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo<4, 32, 2, 2, false, false>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kRNormUndo<4, 32, 2, 2, true, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo<4, 32, 2, 2, true, false>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
hipFuncSetCacheConfig(kRNormUndo<4, 32, 1, 2, false, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo<4, 32, 1, 2, false, true>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kRNormUndo<4, 32, 1, 2, true, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo<4, 32, 1, 2, true, true>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
hipFuncSetCacheConfig(kRNormUndo<4, 32, 1, 2, false, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo<4, 32, 1, 2, false, false>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kRNormUndo<4, 32, 1, 2, true, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo<4, 32, 1, 2, true, false>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
}
}
}
getLastCudaError("kRNormUndo: kernel execution failed");
}
/*
* imgs: (numChannels, imgPixels, numImages) with given imgStride
* target: (numChannels, tgtPixels, numImages)
*
* imgSize = scale * tgtSize
*/
void convResizeBilinear(NVMatrix& images, NVMatrix& target, int imgSize, int tgtSize, float scale) {
assert(!images.isTrans());
assert(!target.isTrans());
int imgPixels = imgSize * imgSize;
int tgtPixels = tgtSize * tgtSize;
int numChannels = images.getNumRows() / imgPixels;
int numImages = images.getNumCols();
assert(images.getNumRows() == numChannels * imgPixels);
target.resize(numChannels * tgtPixels, numImages);
assert(target.isContiguous());
int numChunksX = DIVUP(tgtSize, 4);
int numChunks = numChunksX * numChunksX;
double imgCenter = imgSize * 0.5;
double tgtCenter = tgtSize * 0.5;
double centerScale = imgCenter - tgtCenter * scale;
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
bool checkCaseBounds = numImages % (32*imgsPerThread) != 0;
dim3 threads(32, 16);
dim3 blocks(DIVUP(numImages, imgsPerThread * 32), numChannels * numChunks);
if (imgsPerThread == 4) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kResizeBilinear<4, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kResizeBilinear<4, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, tgtSize, numImages, images.getStride(), scale, centerScale);
} else {
hipFuncSetCacheConfig(kResizeBilinear<4, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kResizeBilinear<4, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, tgtSize, numImages, images.getStride(), scale, centerScale);
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kResizeBilinear<2, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kResizeBilinear<2, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, tgtSize, numImages, images.getStride(), scale, centerScale);
} else {
hipFuncSetCacheConfig(kResizeBilinear<2, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kResizeBilinear<2, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, tgtSize, numImages, images.getStride(), scale, centerScale);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kResizeBilinear<1, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kResizeBilinear<1, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, tgtSize, numImages, images.getStride(), scale, centerScale);
} else {
hipFuncSetCacheConfig(kResizeBilinear<1, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kResizeBilinear<1, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, tgtSize, numImages, images.getStride(), scale, centerScale);
}
}
getLastCudaError("convResizeBilinear: kernel execution failed");
}
/*
* imgs: (3, imgPixels, numImages) with given imgStride
* target: (3, imgPixels, numImages)
*/
void convRGBToYUV(NVMatrix& images, NVMatrix& target) {
assert(!images.isTrans());
assert(!target.isTrans());
int imgPixels = images.getNumRows() / 3;
int numImages = images.getNumCols();
assert(images.getNumRows() == 3 * imgPixels);
target.resize(3 * imgPixels, numImages);
assert(target.isContiguous());
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
bool checkCaseBounds = numImages % (32*imgsPerThread) != 0;
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages, imgsPerThread * 32), DIVUP(imgPixels, 4));
if (imgsPerThread == 4) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kRGBToYUV<4, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToYUV<4, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
} else {
hipFuncSetCacheConfig(kRGBToYUV<4, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToYUV<4, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kRGBToYUV<2, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToYUV<2, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
} else {
hipFuncSetCacheConfig(kRGBToYUV<2, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToYUV<2, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kRGBToYUV<1, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToYUV<1, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
} else {
hipFuncSetCacheConfig(kRGBToYUV<1, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToYUV<1, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
}
}
getLastCudaError("convRGBToYUV: kernel execution failed");
}
/*
* imgs: (3, imgPixels, numImages) with given imgStride
* target: (3, imgPixels, numImages)
*/
void convRGBToLAB(NVMatrix& images, NVMatrix& target, bool center) {
assert(!images.isTrans());
assert(!target.isTrans());
int imgPixels = images.getNumRows() / 3;
int numImages = images.getNumCols();
assert(images.getNumRows() == 3 * imgPixels);
target.resize(3 * imgPixels, numImages);
assert(target.isContiguous());
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
bool checkCaseBounds = numImages % (32*imgsPerThread) != 0;
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages, imgsPerThread * 32), DIVUP(imgPixels, 4));
if (imgsPerThread == 4) {
if (center) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kRGBToLAB<4, true, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToLAB<4, true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
} else {
hipFuncSetCacheConfig(kRGBToLAB<4, false, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToLAB<4, false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kRGBToLAB<4, true, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToLAB<4, true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
} else {
hipFuncSetCacheConfig(kRGBToLAB<4, false, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToLAB<4, false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
}
}
} else if (imgsPerThread == 2) {
if (center) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kRGBToLAB<2, true, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToLAB<2, true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
} else {
hipFuncSetCacheConfig(kRGBToLAB<2, false, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToLAB<2, false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kRGBToLAB<2, true, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToLAB<2, true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
} else {
hipFuncSetCacheConfig(kRGBToLAB<2, false, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToLAB<2, false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
}
}
} else {
if (center) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kRGBToLAB<1, true, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToLAB<1, true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
} else {
hipFuncSetCacheConfig(kRGBToLAB<1, false, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToLAB<1, false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kRGBToLAB<1, true, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToLAB<1, true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
} else {
hipFuncSetCacheConfig(kRGBToLAB<1, false, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToLAB<1, false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
}
}
}
getLastCudaError("convRGBToLAB: kernel execution failed");
}
/*
* imgs: (numChannels, imgPixels, numImages) with given imgStride
* target: (numChannels, tgtPixels, numImages)
*/
void convCrop(NVMatrix& imgs, NVMatrix& target, int imgSize, int tgtSize, int startY, int startX) {
int numImages = imgs.getNumCols();
int imgPixels = imgSize * imgSize;
int tgtPixels = tgtSize * tgtSize;
int numChannels = imgs.getNumRows() / imgPixels;
assert(imgs.getNumRows() == imgPixels * numChannels);
assert(imgPixels == imgSize * imgSize);
assert(imgSize - startY >= tgtSize);
assert(imgSize - startX >= tgtSize);
assert(startY >= 0);
assert(startX >= 0);
target.resize(numChannels * tgtPixels, numImages);
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
bool checkCaseBounds = numImages % (32*imgsPerThread) != 0;
dim3 blocks(DIVUP(numImages, 32 * imgsPerThread), numChannels * DIVUP(tgtPixels, 4));
dim3 threads(32, 4);
if (imgsPerThread == 4) {
if (checkCaseBounds) {
hipLaunchKernelGGL(( kCrop<4, true>), dim3(blocks), dim3(threads), 0, 0, imgs.getDevData(), target.getDevData(), numImages, imgs.getStride(), imgSize, tgtSize, startY, startX);
} else {
hipLaunchKernelGGL(( kCrop<4, false>), dim3(blocks), dim3(threads), 0, 0, imgs.getDevData(), target.getDevData(), numImages, imgs.getStride(), imgSize, tgtSize, startY, startX);
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
hipLaunchKernelGGL(( kCrop<2, true>), dim3(blocks), dim3(threads), 0, 0, imgs.getDevData(), target.getDevData(), numImages, imgs.getStride(), imgSize, tgtSize, startY, startX);
} else {
hipLaunchKernelGGL(( kCrop<2, false>), dim3(blocks), dim3(threads), 0, 0, imgs.getDevData(), target.getDevData(), numImages, imgs.getStride(), imgSize, tgtSize, startY, startX);
}
} else {
if (checkCaseBounds) {
hipLaunchKernelGGL(( kCrop<1, true>), dim3(blocks), dim3(threads), 0, 0, imgs.getDevData(), target.getDevData(), numImages, imgs.getStride(), imgSize, tgtSize, startY, startX);
} else {
hipLaunchKernelGGL(( kCrop<1, false>), dim3(blocks), dim3(threads), 0, 0, imgs.getDevData(), target.getDevData(), numImages, imgs.getStride(), imgSize, tgtSize, startY, startX);
}
}
getLastCudaError("convCrop: kernel execution failed");
}
/*
* images: (numFilters, imgPixels, numImages)
* ticas: (numFilters, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages) (out)
*
* Computes TICA-style gradient for given feature maps
* f(x) = exp(-(sum_i{x_i^2}^(1/2)))
* dlogf(x)/df(x) = -x_i / (sum_i{x_i^2}^(1/2) + eps)
*
* eps added for numerical stability
*/
void convTICAGrad(NVMatrix& images, NVMatrix& ticas, NVMatrix& target, int numFilters, int sizeX, float scaleTarget, float scaleOutput) {
int numImages = images.getNumCols();
int imgPixels = images.getNumRows() / numFilters;
assert(images.getNumRows() == numFilters * imgPixels);
int imgSize = int(sqrt(imgPixels));
assert(imgSize * imgSize == imgPixels);
assert(!images.isTrans());
assert(images.isContiguous());
assert(numFilters % 16 == 0 || numFilters <= 8);
assert(ticas.isSameDims(images));
assert(ticas.isContiguous());
if (scaleTarget == 0) {
target.resize(images);
} else {
assert(target.isSameDims(images));
}
assert(target.isContiguous());
// TEMPORARY
assert(numFilters > 8);
assert(sizeX < 6);
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages, 32*4) * imgSize, (numFilters / (4 * 2)) * imgSize);
bool checkCaseBounds = (numImages % 128) != 0;
if (checkCaseBounds) {
hipFuncSetCacheConfig(kTICAGrad_manyfilter<4, 32, 4, 2, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kTICAGrad_manyfilter<4, 32, 4, 2, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), ticas.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, sizeX, scaleTarget, scaleOutput);
} else {
hipFuncSetCacheConfig(kTICAGrad_manyfilter<4, 32, 4, 2, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kTICAGrad_manyfilter<4, 32, 4, 2, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), ticas.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, sizeX, scaleTarget, scaleOutput);
}
getLastCudaError("convTICAGrad: kernel execution failed");
}
/*
* images: (numFilters, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages) (out)
*
* Computes TICA-style gradient for given feature maps
* f(x) = exp(-(sum_i{x_i^2}^(1/2)))
* dlogf(x)/df(x) = -x_i / (sum_i{x_i^2}^(1/2) + eps)
*
* eps added for numerical stability
*/
void convTICA(NVMatrix& images, NVMatrix& target, int numFilters, int sizeX, float scaleTarget, float scaleOutput) {
int numImages = images.getNumCols();
int imgPixels = images.getNumRows() / numFilters;
assert(images.getNumRows() == numFilters * imgPixels);
int imgSize = int(sqrt(imgPixels));
assert(imgSize * imgSize == imgPixels);
assert(!images.isTrans());
assert(images.isContiguous());
assert(numFilters % 16 == 0 || numFilters <= 8);
if (scaleTarget == 0) {
target.resize(images);
} else {
assert(target.isSameDims(images));
}
assert(target.isContiguous());
// TEMPORARY
assert(numFilters > 8);
assert(sizeX < 6);
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages, 32*4) * imgSize, (numFilters / (4 * 2)) * imgSize);
bool checkCaseBounds = (numImages % 128) != 0;
if (checkCaseBounds) {
hipFuncSetCacheConfig(kTICA_manyfilter<4, 32, 4, 2, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kTICA_manyfilter<4, 32, 4, 2, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, sizeX, scaleTarget, scaleOutput);
} else {
hipFuncSetCacheConfig(kTICA_manyfilter<4, 32, 4, 2, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kTICA_manyfilter<4, 32, 4, 2, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, sizeX, scaleTarget, scaleOutput);
}
getLastCudaError("convTICA: kernel execution failed");
}
/*
* images: (numFilters, imgPixels, numImages)
* meanDiffs: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages) (out)
* target: (numFilters, imgPixels, numImages) (out)
* Note: at present, I have no code to compute the meanDiffs. So it should be set
* to be equal to images. In other words, this isn't really doing contrast normalization,
* just response normalization.
*/
void convContrastNormCrossMap(NVMatrix& images, NVMatrix& meanDiffs, NVMatrix& denoms, NVMatrix& target,
int numFilters, int sizeF, float addScale, float powScale, bool blocked) {
int numImages = images.getNumCols();
int imgPixels = images.getNumRows() / numFilters;
assert(images.getNumRows() == numFilters * imgPixels);
int imgSize = int(sqrt(imgPixels));
assert(imgSize * imgSize == imgPixels);
assert(meanDiffs.isSameDims(images));
assert(sizeF > 0 && sizeF <= numFilters);
assert(!meanDiffs.isTrans());
assert(!images.isTrans());
assert(images.isContiguous());
assert(meanDiffs.isContiguous());
assert(numFilters % 16 == 0);
target.resize(images);
denoms.resize(images);
assert(target.isContiguous());
bool checkCaseBounds = numImages % 128 != 0;
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages,32*4) * imgSize, (numFilters / 4) * imgSize);
if (blocked) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kFCNorm<4, 32, 4, true, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kFCNorm<4, 32, 4, true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, sizeF, addScale, powScale);
} else {
hipFuncSetCacheConfig(kFCNorm<4, 32, 4, false, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kFCNorm<4, 32, 4, false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, sizeF, addScale, powScale);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kFCNorm<4, 32, 4, true, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kFCNorm<4, 32, 4, true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, sizeF, addScale, powScale);
} else {
hipFuncSetCacheConfig(kFCNorm<4, 32, 4, false, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kFCNorm<4, 32, 4, false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, sizeF, addScale, powScale);
}
}
getLastCudaError("convContrastNormCrossMap: kernel execution failed");
}
/*
* outGrads: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages)
* inputs: (numFilters, imgPixels, numImages)
* acts: (numFilters, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages)
*
* THIS WILL OVERWRITE THE ACTS MATRIX.
*/
void convResponseNormCrossMapUndo(NVMatrix& outGrads, NVMatrix& denoms, NVMatrix& inputs, NVMatrix& acts, NVMatrix& target, int numFilters,
int sizeF, float addScale, float powScale, bool blocked, float scaleTargets, float scaleOutput) {
int numImages = outGrads.getNumCols();
int imgPixels = outGrads.getNumRows() / numFilters;
int imgSize = int(sqrt(imgPixels));
assert(imgSize * imgSize == imgPixels);
assert(sizeF > 0 && sizeF <= numFilters);
assert(outGrads.getNumRows() == numFilters * imgPixels);
assert(denoms.isSameDims(outGrads));
assert(acts.isSameDims(denoms));
assert(!denoms.isTrans());
assert(!outGrads.isTrans());
assert(!acts.isTrans());
assert(!target.isTrans());
assert(outGrads.isContiguous());
assert(numFilters % 16 == 0);
target.resize(outGrads);
assert(target.isContiguous());
// First do acts := -2 x scale x acts x outGrads / denoms
// so that the main routine only has to do an addition in its inner loop.
int prelimEltsPerThread = 4;
dim3 threads(128);
dim3 blocks(MIN(512, DIVUP(outGrads.getNumElements(),(threads.x * prelimEltsPerThread))));
hipLaunchKernelGGL(( kRNormUndoPrelims<128, 4>), dim3(blocks), dim3(threads), 0, 0, acts.getDevData(), denoms.getDevData(), outGrads.getDevData(), outGrads.getNumElements(), -2*addScale*powScale);
// Now the main routine
dim3 threads2 = dim3(32, 4);
dim3 blocks2 = dim3(DIVUP(numImages,32*4) * imgSize, (numFilters / 4) * imgSize);
bool checkCaseBounds = (numImages % 128) != 0;
if (blocked) {
if (scaleTargets == 0 && scaleOutput == 1) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kFRNormUndo<4, 32, 4, false, true, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kFRNormUndo<4, 32, 4, false, true, true>), dim3(blocks2), dim3(threads2), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale,
scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kFRNormUndo<4, 32, 4, false, false, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kFRNormUndo<4, 32, 4, false, false, true>), dim3(blocks2), dim3(threads2), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale,
scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kFRNormUndo<4, 32, 4, true, true, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kFRNormUndo<4, 32, 4, true, true, true>), dim3(blocks2), dim3(threads2), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale,
scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kFRNormUndo<4, 32, 4, true, false, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kFRNormUndo<4, 32, 4, true, false, true>), dim3(blocks2), dim3(threads2), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale,
scaleTargets, scaleOutput);
}
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kFRNormUndo<4, 32, 4, false, true, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kFRNormUndo<4, 32, 4, false, true, false>), dim3(blocks2), dim3(threads2), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale,
scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kFRNormUndo<4, 32, 4, false, false, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kFRNormUndo<4, 32, 4, false, false, false>), dim3(blocks2), dim3(threads2), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale,
scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kFRNormUndo<4, 32, 4, true, true, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kFRNormUndo<4, 32, 4, true, true, false>), dim3(blocks2), dim3(threads2), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale,
scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kFRNormUndo<4, 32, 4, true, false, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kFRNormUndo<4, 32, 4, true, false, false>), dim3(blocks2), dim3(threads2), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale,
scaleTargets, scaleOutput);
}
}
}
getLastCudaError("convResponseNormCrossMapUndo: kernel execution failed");
}
void convResponseNormCrossMap(NVMatrix& images, NVMatrix& denoms, NVMatrix& target, int numFilters, int sizeF, float addScale, float powScale, bool blocked) {
convContrastNormCrossMap(images, images, denoms, target, numFilters, sizeF, addScale, powScale, blocked);
}
|
d90438c96024f2044821e6e4a8c31b6f91d2e2ad.cu
|
/*
* Copyright (c) 2011, Alex Krizhevsky (akrizhevsky@gmail.com)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <iostream>
#include <assert.h>
#include <nvmatrix_kernels.cuh>
#include <nvmatrix.cuh>
#include <conv_util.cuh>
using namespace std;
__device__ inline float square(const float a) {
return a * a;
}
/*
* blockIdx.y determines module in batches of B_Y
* blockIdx.x determines filter in batches of B_X * filtersPerThread
*
* weights: (numModules, numColors, filterPixels, numFilters)
* Not fully coalesced if B_X < 32, so use cache.
*/
template <int B_Y, int B_X, int filtersPerThread>
__global__ void kNormalizeLCWeights(float* weights, const uint numFilters, const int numModules, const uint weightsPerFilter, const float norm) {
const uint moduleIdx = B_Y * blockIdx.y + threadIdx.y;
const uint filterIdx = B_X * blockIdx.x + threadIdx.x;
float prod[filtersPerThread];
#pragma unroll
for (uint i = 0; i < filtersPerThread; ++i) {
prod[i] = 0;
}
if (moduleIdx < numModules) {
weights += moduleIdx * weightsPerFilter * numFilters + filterIdx;
for (uint p = 0; p < weightsPerFilter; ++p) {
#pragma unroll
for (uint i = 0; i < filtersPerThread; ++i) {
prod[i] += square(weights[p * numFilters + i * B_X]);
}
}
#pragma unroll
for (uint i = 0; i < filtersPerThread; ++i) {
prod[i] = sqrtf(prod[i]);
prod[i] = prod[i] > norm ? __fdividef(norm, prod[i]) : 1.0f;
}
for (uint p = 0; p < weightsPerFilter; ++p) {
#pragma unroll
for (uint i = 0; i < filtersPerThread; ++i) {
weights[p * numFilters + i * B_X] *= prod[i];
}
}
}
}
/*
* weights: (numModules, numColors, filterPixels, numFilters)
*/
void normalizeLocalWeights(NVMatrix& weights, int numModules, float norm) {
int numFilters = weights.getNumCols();
int weightsPerFilter = weights.getNumRows() / numModules;
assert(numModules * weightsPerFilter == weights.getNumRows());
assert(!weights.isTrans());
assert(weights.isContiguous());
assert(numFilters % 16 == 0);
int bx = numFilters % 32 == 0 ? 32 : 16;
int by = bx == 32 ? 4 : 8;
int filtersPerThread = numFilters % 128 == 0 ? 4 : numFilters % 64 == 0 ? 2 : 1;
dim3 blocks(numFilters / (bx * filtersPerThread), DIVUP(numModules, by));
dim3 threads(bx, by);
if (filtersPerThread == 4) {
cudaFuncSetCacheConfig(kNormalizeLCWeights<4, 32, 4>, cudaFuncCachePreferL1);
kNormalizeLCWeights<4, 32, 4><<<blocks, threads>>>(weights.getDevData(), numFilters, numModules, weightsPerFilter, norm);
} else if (filtersPerThread == 2) {
cudaFuncSetCacheConfig(kNormalizeLCWeights<4, 32, 2>, cudaFuncCachePreferL1);
kNormalizeLCWeights<4, 32, 2><<<blocks, threads>>>(weights.getDevData(), numFilters, numModules, weightsPerFilter, norm);
} else {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(kNormalizeLCWeights<4, 32, 1>, cudaFuncCachePreferL1);
kNormalizeLCWeights<4, 32, 1><<<blocks, threads>>>(weights.getDevData(), numFilters, numModules, weightsPerFilter, norm);
} else {
cudaFuncSetCacheConfig(kNormalizeLCWeights<8, 16, 1>, cudaFuncCachePreferL1);
kNormalizeLCWeights<8, 16, 1><<<blocks, threads>>>(weights.getDevData(), numFilters, numModules, weightsPerFilter, norm);
}
}
}
/*
* Block size 4x32
* blockIdx.x determines img idx in batches of 32*imgsPerThread
* blockIdx.y determines channel idx, pixel idx in batches of 4
*
* threadIdx.x determins case idx
* threadIdx.y determines pixel idx
*
* imgs: (numChannels, imgPixels, numImages) with given imgStride
* target: (numChannels, tgtPixels, numImages)
*/
template <int imgsPerThread, bool checkCaseBounds>
__global__ void kCrop(float* imgs, float* target, const uint numImages, const int imgStride,
const uint imgSize, const uint tgtSize, const uint startY, const uint startX) {
const uint imgPixels = imgSize * imgSize;
const uint tgtPixels = tgtSize * tgtSize;
const uint caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x;
const uint blockChanIdx = blockIdx.y / DIVUP(tgtPixels, 4);
const uint tgtPixelIdx = 4*(blockIdx.y % DIVUP(tgtPixels, 4)) + threadIdx.y;
const uint tgtPxY = tgtPixelIdx / tgtSize;
const uint tgtPxX = tgtPixelIdx % tgtSize;
const uint srcPixelIdx = (startY + tgtPxY) * imgSize + startX + tgtPxX;
if (tgtPixelIdx < tgtPixels) {
imgs += (blockChanIdx * imgPixels + srcPixelIdx) * imgStride + caseIdx;
target += (blockChanIdx * tgtPixels + tgtPixelIdx) * numImages + caseIdx;
#pragma unroll
for (uint i = 0; i < imgsPerThread; ++i) {
if (!checkCaseBounds || (caseIdx + 32 * i < numImages)) {
target[i * 32] = imgs[i * 32];
}
}
}
}
/*
* Block size 4x32
* blockIdx.y determines pixel idx in batches of 4
* blockIdx.x determines case idx in batches of 32*imgsPerThread
* threadIdx.y determines pixel idx
* threadIdx.x determines case idx
*
* imgs: (3, imgPixels, numImages) with given imgStride
* target: (3, imgPixels, numImages)
*
* Each thread produces (y,u,v) values for a particular (r,g,b) pixel
*
* The RGB --> YUV transform is (http://en.wikipedia.org/wiki/YUV):
*
* [Y] [0.2126 0.7152 0.0722 ][R]
* [U] = [-0.09991 -0.33609 0.436 ][G]
* [V] [0.615 -0.55861 -0.05639][B]
*/
template <int imgsPerThread, bool checkCaseBounds>
__global__ void kRGBToYUV(float* imgs, float* target, const int imgPixels, const int numImages, const int imgStride) {
const int caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x;
const int pxIdx = blockIdx.y * 4 + threadIdx.y;
if (pxIdx < imgPixels) {
const int imgChannelStride = imgPixels * imgStride;
const int tgtChannelStride = imgPixels * numImages;
imgs += pxIdx * imgStride + caseIdx;
target += pxIdx * numImages + caseIdx;
#pragma unroll
for (int i = 0; i < imgsPerThread; ++i) {
if (!checkCaseBounds || caseIdx + i * 32 < numImages) {
const float R = imgs[0 * imgChannelStride + i * 32];
const float G = imgs[1 * imgChannelStride + i * 32];
const float B = imgs[2 * imgChannelStride + i * 32];
target[0 * tgtChannelStride + i * 32] = 0.2126f * R + 0.7152f * G + 0.0722f * B; // Y
target[1 * tgtChannelStride + i * 32] = -0.09991f * R + -0.33609f * G + 0.436f * B; // U
target[2 * tgtChannelStride + i * 32] = 0.615f * R + -0.55861f * G + -0.05639f * B; // V
}
}
}
}
__device__ inline float labf(const float x) {
if (x > 0.0088564517f) {
return __powf(x, 0.3333f);
}
return 7.787037f * x + 0.13793103f;
}
/*
* Block size 4x32
* blockIdx.y determines pixel idx in batches of 4
* blockIdx.x determines case idx in batches of 32*imgsPerThread
* threadIdx.y determines pixel idx
* threadIdx.x determines case idx
*
* imgs: (3, imgPixels, numImages) with given imgStride
* target: (3, imgPixels, numImages)
*
* This proceeds in two steps.
*
* - First, RGB values are linearly transformed to XYZ as per
* http://en.wikipedia.org/wiki/CIE_XYZ_color_space
* - Second, XYZ values are nonlinearly transformed to L*a*b* as per
* http://en.wikipedia.org/wiki/Lab_color_space#The_forward_transformation
*
* Each thread produces (L*,a*,b*) values for a particular (r,g,b) pixel
*
* The RGB --> XYZ transform is:
*
* [X] [0.49 0.31 0.2 ][R]
* [Y] = 5.6506753 * [0.17697 0.8124 0.01063 ][G]
* [Z] [0 0.01 0.99 ][B]
*
* NOTE: The input should be in the range 0-1. Don't do mean-subtraction beforehand.
*
* Then X_max, Y_max, Z_max = 5.6506753.
*
* The range of the L* values is [0, 100].
* If the center flag is given, the range will be [-50, 50].
*
*/
template <int imgsPerThread, bool checkCaseBounds, bool center>
__global__ void kRGBToLAB(float* imgs, float* target, const int imgPixels, const int numImages, const int imgStride) {
const int caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x;
const int pxIdx = blockIdx.y * 4 + threadIdx.y;
if (pxIdx < imgPixels) {
const int imgChannelStride = imgPixels * imgStride;
const int tgtChannelStride = imgPixels * numImages;
imgs += pxIdx * imgStride + caseIdx;
target += pxIdx * numImages + caseIdx;
#pragma unroll
for (int i = 0; i < imgsPerThread; ++i) {
if (!checkCaseBounds || caseIdx + i * 32 < numImages) {
const float R = imgs[0 * imgChannelStride + i * 32];
const float G = imgs[1 * imgChannelStride + i * 32];
const float B = imgs[2 * imgChannelStride + i * 32];
const float X = (0.49f * R + 0.31f * G + 0.2f * B);
const float Y = (0.17697f * R + 0.8124f * G + 0.01063f * B);
const float Z = (0.01f * G + 0.99f * B);
const float labX = labf(X);
const float labY = labf(Y);
const float labZ = labf(Z);
target[0 * tgtChannelStride + i * 32] = 116.0f * labY - 16.0f - (center ? 50.0f : 0); // L*
target[1 * tgtChannelStride + i * 32] = 500.0f * (labX - labY); // a*
target[2 * tgtChannelStride + i * 32] = 200.0f * (labY - labZ); // b*
}
}
}
}
/*
* Block size 16x32.
* Each block produces a 4x4 chunk of the output image.
* threadIdx.y determines pixel idx in 4x4 chunk.
* threadIdx.x determines case idx.
* blockIdx.x determines case idx in batches of 32*imgsPerThread.
* blockIdx.y determines 4x4 chunk idx, channel idx.
*
* imgs: (numChannels, imgPixels, numImages) with given imgStride
* target: (numChannels, tgtPixels, numImages)
*
* imgSize = scale * tgtSize (roughly)
*
* This is a rather naive kernel that relies on cache for speed. But all it's doing
* is basic texture manipulation, which is very local in nature, so it should be ok.
* Also, it will in practice be a tiny fraction of the runtime of a large convnet.
*
* So that is my justification for being lazy here.
*/
template <int imgsPerThread, bool checkCaseBounds>
__global__ void kResizeBilinear(float* imgs, float* target, const int imgSize, const int tgtSize,
const int numImages, const int imgStride, const float scale,
const float centerScale) {
const int numChunksX = DIVUP(tgtSize, 4);
const int numChunks = numChunksX * numChunksX;
const int channelIdx = blockIdx.y / numChunks;
const int chunkIdx = blockIdx.y % numChunks;
const int chunkIdxX = chunkIdx % numChunksX;
const int chunkIdxY = chunkIdx / numChunksX;
const int caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x;
const int imgPixels = imgSize * imgSize;
const int tgtPixels = tgtSize * tgtSize;
const int pxX = 4 * chunkIdxX + threadIdx.y % 4;
const int pxY = 4 * chunkIdxY + threadIdx.y / 4;
if (pxY < tgtSize && pxX < tgtSize) {
const int pxIdx = pxY * tgtSize + pxX;
imgs += channelIdx * imgPixels * imgStride + caseIdx;
target += channelIdx * tgtPixels * numImages + pxIdx * numImages + caseIdx;
// This will cause slight distortions at the edges when upsampling in some cases.
// But I think that's not a big deal.
const float srcPxX = fmaxf(0.0f, fminf(__int2float_rn(imgSize) - 1.01f, __int2float_rn(pxX) * scale + centerScale));
const float srcPxY = fmaxf(0.0f, fminf(__int2float_rn(imgSize) - 1.01f, __int2float_rn(pxY) * scale + centerScale));
const float u = floorf(srcPxX + 1) - srcPxX;
const float w = srcPxY - floorf(srcPxY);
// Consider doing max(0, min(imgSize, x)) here
const int srcPx0 = (__float2int_rd(srcPxY) * imgSize + __float2int_rd(srcPxX)); // top-left
const int srcPx1 = srcPx0 + 1; // top-right
const int srcPx2 = srcPx0 + imgSize; // bottom-left
const int srcPx3 = srcPx2 + 1; // bottom-right
#pragma unroll
for (int c = 0; c < imgsPerThread; ++c) {
if (!checkCaseBounds || caseIdx + c * 32 < numImages) {
const float val0 = imgs[srcPx0 * imgStride + c * 32];
const float val1 = imgs[srcPx1 * imgStride + c * 32];
const float val2 = imgs[srcPx2 * imgStride + c * 32];
const float val3 = imgs[srcPx3 * imgStride + c * 32];
const float c0 = u * (val0 - val1) + val1;
const float c1 = u * (val2 - val3) + val3;
target[32 * c] = w * (c1 - c0) + c0;
}
}
}
}
/*
* Block size B_YxB_X.
* B_X*imgsPerThread*blockIdx.x + threadIdx.x determines img idx
* B_Y*blockIdx.y + threadIdx.y determines img row (col if !horiz), channel idx
*
* imgs: (numChannels, imgPixels, numImages) with given imgStride
* filter: (1, 2*radius + 1)
* target: (numChannels, imgPixels, numImages)
*
* target can be the same matrix as imgs.
* radius must be one of 3, 5, 7, 9.
*
* Tried imgsPerThread, slower.
*/
template<int B_Y, int B_X, int radius>
__global__ void kGaussianBlur(float* imgs, float* filter, float* target, const int imgSize,
const int numImages, const int imgStride,
const bool horiz,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilter[radius];
const int imgPixels = imgSize * imgSize;
const int ty = B_Y * blockIdx.y + threadIdx.y;
const int channelIdx = ty / imgSize;
const int rowIdx = ty % imgSize;
const int imgIdx = B_X*blockIdx.x + threadIdx.x;
const int filterWidth = 2*radius+1;
// const int tidx = B_Y * threadIdx.y + threadIdx.x;
if (horiz) {
imgs += channelIdx * imgPixels * imgStride + rowIdx * imgSize * imgStride + imgIdx;
target += channelIdx * imgPixels * numImages + rowIdx * imgSize * numImages + imgIdx;
} else {
imgs += channelIdx * imgPixels * imgStride + rowIdx * imgStride + imgIdx;
target += channelIdx * imgPixels * numImages + rowIdx * numImages + imgIdx;
}
float outputs[filterWidth-1];
#pragma unroll
for (int r = 0; r < filterWidth-1; r++) {
outputs[r] = 0;
}
if (threadIdx.x < filterWidth-1) {
shFilter[threadIdx.x] = filter[threadIdx.x];
}
__syncthreads();
if (imgIdx < numImages) {
// This writes radius*2 = filterWidth - 1 values to outputs
#pragma unroll
for (int col = 0; col < radius; col++) {
float px = imgs[0];
#pragma unroll
for (int r = 0; r < radius + 1 + col; r++) {
outputs[r] += px * shFilter[radius + col - r];
}
imgs += horiz ? imgStride : imgStride * imgSize;
}
// Unfortunately this has to be at this level of granularity
if (scaleTargets != 0) {
for (int col = radius; col < imgSize ; col++) { // loop over img columns
float px = imgs[0];
target[0] = scaleTargets * target[0] + scaleOutputs * (outputs[0] + px * shFilter[0]);
#pragma unroll
for (int r = 1; r < radius*2; r++) {
outputs[r-1] = outputs[r] + px * shFilter[r];
}
outputs[filterWidth - 2] = px * shFilter[0];
imgs += horiz ? imgStride : imgStride * imgSize;
target += horiz ? numImages : numImages * imgSize;
}
#pragma unroll
for (int r = 0; r < radius; r++) {
float* t = &target[0];
t[0] = scaleTargets * t[0] + scaleOutputs * outputs[r];
target += horiz ? numImages : numImages * imgSize;
}
} else {
for (int col = radius; col < imgSize ; col++) { // loop over img columns
float px = imgs[0];
target[0] = scaleOutputs * (outputs[0] + px * shFilter[0]);
#pragma unroll
for (int r = 1; r < radius*2; r++) {
outputs[r-1] = outputs[r] + px * shFilter[r];
}
outputs[filterWidth - 2] = px * shFilter[0];
imgs += horiz ? imgStride : imgStride * imgSize;
target += horiz ? numImages : numImages * imgSize;
}
#pragma unroll
for (int r = 0; r < radius; r++) {
target[0] = scaleOutputs * outputs[r];
target += horiz ? numImages : numImages * imgSize;
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines output.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines output.y, filter idx in batches of B_Y*filtersPerThread
*
* So each block does one output for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* imgs: (numChannels, imgPixels, numImages)
* target: (numChannels, numOutputs, numImages)
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
* numFilters must be divisible by filtersPerThread
*/
template<int B_Y, int B_X, int imgsPerThread, int chansPerThread, bool checkCaseBounds>
__global__ void kBedOfNails(float* imgs, float* target, const int imgSize, const int numChannels,
const int numImages, const int startX, const int strideX, const int outputsX,
const bool reverse, const float scaleTargets, const float scaleOutput) {
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int numChanBlocks = DIVUP(numChannels, B_Y*chansPerThread);
const int outputIdxX = blockIdx.x / numImgBlocks;
const int outputIdxY = blockIdx.y / numChanBlocks;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockChanIdx = (blockIdx.y % numChanBlocks) * B_Y * chansPerThread;
const int myChanIdx = (blockChanIdx + threadIdx.y*chansPerThread);
if (myChanIdx >= numChannels) {
return;
}
// if (blockIdx.x != 0 || blockIdx.y != 0) {
// return;
// }
const int outputIdx = outputIdxY * outputsX + outputIdxX;
const int numOutputs = outputsX * outputsX;
const int imgPixels = imgSize * imgSize;
const int startImgPxX = startX + outputIdxX * strideX;
const int startImgPxY = startX + outputIdxY * strideX;
const int imgIdx = blockImgIdx + threadIdx.x;
const int imgPx = startImgPxY * imgSize + startImgPxX;
imgs += myChanIdx * imgPixels * numImages + imgPx * numImages + imgIdx;
target += (myChanIdx * numOutputs + outputIdx) * numImages + imgIdx;
if (scaleTargets != 0) {
if (!reverse) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < chansPerThread; c++) {
target[c * numOutputs * numImages + i * B_X] = scaleTargets * target[c * numOutputs * numImages + i * B_X] + scaleOutput * imgs[c * imgPixels * numImages + i * B_X];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < chansPerThread; c++) {
imgs[c * imgPixels * numImages + i * B_X] = scaleTargets * imgs[c * imgPixels * numImages + i * B_X] + scaleOutput * target[c * numOutputs * numImages + i * B_X];
}
}
}
}
} else {
if (!reverse) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < chansPerThread; c++) {
target[c * numOutputs * numImages + i * B_X] = scaleOutput * imgs[c * imgPixels * numImages + i * B_X];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < chansPerThread; c++) {
imgs[c * imgPixels * numImages + i * B_X] = scaleOutput * target[c * numOutputs * numImages + i * B_X];
}
}
}
}
}
}
/*
* imgs: (numChannels, imgPixels, numImages)
* target: (numChannels, outputs, numImages)
*/
void _convBedOfNails(NVMatrix& images, NVMatrix& target, int numChannels, int imgSize, int startX, int strideX,
bool reverse, float scaleTargets, float scaleOutput) {
int numImages = reverse ? target.getNumCols() : images.getNumCols();
int imgPixels = imgSize * imgSize;
assert(!images.isTrans());
assert(!target.isTrans());
assert(images.isContiguous());
assert(target.isContiguous());
assert(strideX > 1);
int outputsX = DIVUP(imgSize, strideX);
int outputs = outputsX * outputsX;
if (reverse) {
assert(target.getNumRows() == numChannels * outputs);
} else {
assert(images.getNumRows() == numChannels * imgPixels);
}
if (scaleTargets == 0) {
if (reverse) {
images.resize(numChannels * imgPixels, numImages);
images.apply(NVMatrixOps::Zero());
} else {
target.resize(numChannels*outputs, numImages);
}
} else {
if (reverse) {
assert(images.getNumRows() == numChannels * outputs);
assert(images.getNumCols() == numImages);
} else {
assert(target.getNumRows() == numChannels * outputs);
assert(target.getNumCols() == numImages);
}
}
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
bool checkCaseBounds = numImages % (32*imgsPerThread) != 0;
int chansPerThread = numChannels % 8 == 0 ? 2 : 1;
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages,32*imgsPerThread) * outputsX, DIVUP(numChannels, 4 * chansPerThread) * outputsX);
if (imgsPerThread == 4) {
if (chansPerThread == 1) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kBedOfNails<4, 32, 4, 1, true>, cudaFuncCachePreferL1);
kBedOfNails<4, 32, 4, 1, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(),
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kBedOfNails<4, 32, 4, 1, false>, cudaFuncCachePreferL1);
kBedOfNails<4, 32, 4, 1, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(),
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kBedOfNails<4, 32, 4, 2, true>, cudaFuncCachePreferL1);
kBedOfNails<4, 32, 4, 2, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(),
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kBedOfNails<4, 32, 4, 2, false>, cudaFuncCachePreferL1);
kBedOfNails<4, 32, 4, 2, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(),
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 2) {
if (chansPerThread == 1) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kBedOfNails<4, 32, 2, 1, true>, cudaFuncCachePreferL1);
kBedOfNails<4, 32, 2, 1, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(),
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kBedOfNails<4, 32, 2, 1, false>, cudaFuncCachePreferL1);
kBedOfNails<4, 32, 2, 1, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(),
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kBedOfNails<4, 32, 2, 2, true>, cudaFuncCachePreferL1);
kBedOfNails<4, 32, 2, 2, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(),
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kBedOfNails<4, 32, 2, 2, false>, cudaFuncCachePreferL1);
kBedOfNails<4, 32, 2, 2, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(),
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
}
}
} else {
if (chansPerThread == 1) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kBedOfNails<4, 32, 1, 1, true>, cudaFuncCachePreferL1);
kBedOfNails<4, 32, 1, 1, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(),
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kBedOfNails<4, 32, 1, 1, false>, cudaFuncCachePreferL1);
kBedOfNails<4, 32, 1, 1, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(),
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kBedOfNails<4, 32, 1, 2, true>, cudaFuncCachePreferL1);
kBedOfNails<4, 32, 1, 2, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(),
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kBedOfNails<4, 32, 1, 2, false>, cudaFuncCachePreferL1);
kBedOfNails<4, 32, 1, 2, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(),
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
}
}
}
}
void convBedOfNails(NVMatrix& images, NVMatrix& target, int numChannels, int imgSize, int startX,
int strideX, float scaleTargets, float scaleOutput) {
_convBedOfNails(images, target, numChannels, imgSize, startX, strideX, false, scaleTargets, scaleOutput);
}
void convBedOfNailsUndo(NVMatrix& actsGrad, NVMatrix& target, int numChannels, int imgSize,
int startX, int strideX, float scaleTargets, float scaleOutput) {
_convBedOfNails(target, actsGrad, numChannels, imgSize, startX, strideX, true, scaleTargets, scaleOutput);
}
/*
* imgs: (numChannels, imgPixels, numImages) with given imgStride
* filter: (1, 2*radius + 1)
* target: (numChannels, imgPixels, numImages)
*/
void convGaussianBlur(NVMatrix& images, NVMatrix& filter, NVMatrix& target, bool horiz, int numChannels,
float scaleTargets, float scaleOutputs) {
int numImages = images.getNumCols();
int radius = filter.getNumCols() / 2;
int imgPixels = images.getNumRows() / numChannels;
int imgSize = int(sqrt(imgPixels));
assert(imgPixels == imgSize * imgSize);
assert(radius >= 1 && radius <= 4);
assert(imgSize >= 2 * radius + 1);
assert(filter.getNumRows() == 1);
assert(images.getNumRows() == numChannels * imgPixels);
assert(!images.isTrans());
assert(!filter.isTrans());
assert(!target.isTrans());
assert(target.isContiguous());
if (scaleTargets == 0) {
target.resize(images);
} else {
assert(target.isSameDims(images));
}
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages, threads.x), DIVUP(numChannels*imgSize, threads.y));
if (radius == 1) {
cudaFuncSetCacheConfig(kGaussianBlur<4, 32, 1>, cudaFuncCachePreferL1);
kGaussianBlur<4, 32, 1><<<blocks, threads>>>(images.getDevData(), filter.getDevData(), target.getDevData(),
imgSize, numImages, images.getStride(), horiz, scaleTargets, scaleOutputs);
} else if (radius == 2) {
cudaFuncSetCacheConfig(kGaussianBlur<4, 32, 2>, cudaFuncCachePreferL1);
kGaussianBlur<4, 32, 2><<<blocks, threads>>>(images.getDevData(), filter.getDevData(), target.getDevData(),
imgSize, numImages, images.getStride(), horiz, scaleTargets, scaleOutputs);
} else if (radius == 3) {
cudaFuncSetCacheConfig(kGaussianBlur<4, 32, 3>, cudaFuncCachePreferL1);
kGaussianBlur<4, 32, 3><<<blocks, threads>>>(images.getDevData(), filter.getDevData(), target.getDevData(),
imgSize, numImages, images.getStride(), horiz, scaleTargets, scaleOutputs);
} else if (radius == 4) {
cudaFuncSetCacheConfig(kGaussianBlur<4, 32, 4>, cudaFuncCachePreferL1);
kGaussianBlur<4, 32, 4><<<blocks, threads>>>(images.getDevData(), filter.getDevData(), target.getDevData(),
imgSize, numImages, images.getStride(), horiz, scaleTargets, scaleOutputs);
}
}
/*
* Block size 1x128
* blockIdx.x determines pixel.x, image idx in batches of 128*imgsPerThread
* blockIdx.y determines pixel.y
*
* So each block does one output for some number of images and all the fliters.
*
* threadIdx.x determines img idx
*
* imgs: (numFilters, imgPixels, numImages)
* meanDiffs: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages) (out)
* target: (numFilters, imgPixels, numImages) (out)
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
* numFilters must be divisible by B_Y*filtersPerThread
*/
template<int imgsPerThread, int numFilters, bool checkCaseBounds>
__global__ void kCNorm_fewfilter(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize,
const int numImages, const int sizeX, const float addScale, const float powScale) {
const int imgPixels = imgSize * imgSize;
const int numImgBlocks = DIVUP(numImages, 128*imgsPerThread);
const int pxIdxX = blockIdx.x / numImgBlocks;
const int pxIdxY = blockIdx.y;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * 128 * imgsPerThread;
const int pxIdx = pxIdxY * imgSize + pxIdxX;
const int startPxX = -sizeX/2 + pxIdxX;
const int startPxY = -sizeX/2 + pxIdxY;
const int imgIdx = blockImgIdx + threadIdx.x;
imgs += pxIdx * numImages + imgIdx;
denoms += pxIdx * numImages + imgIdx;
meanDiffs += imgIdx;
target += pxIdx * numImages + imgIdx;
float prod[numFilters][imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * 128 < numImages) {
#pragma unroll
for (int f = 0; f < numFilters; f++) {
prod[f][i] = 0;
}
}
}
const int loopStartY = MAX(0, startPxY);
const int loopStartX = MAX(0, startPxX);
const int loopEndY = MIN(imgSize, startPxY + sizeX);
const int loopEndX = MIN(imgSize, startPxX + sizeX);
for (int y = loopStartY; y < loopEndY; y++) {
for (int x = loopStartX; x < loopEndX; x++) {
const int imgPx = y * imgSize + x;
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * 128 < numImages) {
#pragma unroll
for (int f = 0; f < numFilters; f++) {
prod[f][i] += square(meanDiffs[(f * imgPixels + imgPx) * numImages + i * 128]);
}
}
}
}
}
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * 128 < numImages) {
#pragma unroll
for (int f = 0; f < numFilters; f++) {
prod[f][i] = 1 + addScale * prod[f][i];
denoms[f * imgPixels * numImages + i * 128] = prod[f][i];
target[f * imgPixels * numImages + i * 128] = imgs[f * imgPixels * numImages + i * 128] * __powf(prod[f][i], -powScale);
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread
*
* So each block does one pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* imgs: (numFilters, imgPixels, numImages)
* means: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages) (out)
* target: (numFilters, imgPixels, numImages) (out)
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
* numFilters must be divisible by B_Y*filtersPerThread
*/
template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds>
__global__ void kCNorm_manyfilter(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize,
const int numFilters, const int numImages, const int sizeX,
const float addScale, const float powScale) {
const int imgPixels = imgSize * imgSize;
const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread);
const int numFilterBlocks = numFilters/(B_Y*filtersPerThread);
const int pxIdxX = blockIdx.x / numImgBlocks;
const int pxIdxY = blockIdx.y / numFilterBlocks;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * B_Y * filtersPerThread;
const int pxIdx = pxIdxY * imgSize + pxIdxX;
const int startPxX = -sizeX/2 + pxIdxX;
const int startPxY = -sizeX/2 + pxIdxY;
const int imgIdx = blockImgIdx + threadIdx.x;
imgs += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx;
meanDiffs += (blockFilterIdx + threadIdx.y) * imgPixels * numImages + imgIdx;
denoms += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx;
target += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] = 0;
}
}
}
const int loopStartY = MAX(0, startPxY);
const int loopStartX = MAX(0, startPxX);
const int loopEndY = MIN(imgSize, startPxY + sizeX);
const int loopEndX = MIN(imgSize, startPxX + sizeX);
for (int y = loopStartY; y < loopEndY; y++) {
for (int x = loopStartX; x < loopEndX; x++) {
const int imgPx = y * imgSize + x;
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] += square(meanDiffs[(f * B_Y * imgPixels + imgPx) * numImages + i * B_X]);
}
}
}
}
}
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] = 1 + addScale * prod[f][i];
denoms[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i];
target[f * B_Y * imgPixels * numImages + i * B_X] = imgs[f * B_Y * imgPixels * numImages + i * B_X] * __powf(prod[f][i], -powScale);
}
}
}
}
/*
* Block size 16xB_X
* blockIdx.x determines 4x4 pixel.x region, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines 4x4 pixel.y region, filter idx in batches of filtersPerThread
*
* So each block does 4x4 region of pixels for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines pixel idx
*
* imgs: (numFilters, imgPixels, numImages)
* means: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages) (out)
* target: (numFilters, imgPixels, numImages) (out)
*
* B_X one of 8, 16, 32
* imgsPerThread one of 1, 2, 4, 8, 16
*
* B_XximgsPerThread MUST be divisible by 32.
* Number of filters MUST be divisible by filtersPerThread.
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
* numFilters must be divisible by filtersPerThread
*
* Final write-out will not be fully coalesced unless B_X is 32. But there's a lot more
* reading than writing here, and the reading is all coalesced, so it should be OK.
*/
template<int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds>
__global__ void kCNorm2(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize,
const int numFilters, const int numImages, const int sizeX, const float addScale, const float powScale) {
__shared__ float shDiffs[filtersPerThread][B_X*imgsPerThread];
const int imgPixels = imgSize * imgSize;
const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread);
const int numFilterBlocks = numFilters/(filtersPerThread);
const int blockPxX = 4*(blockIdx.x / numImgBlocks);
const int blockPxY = 4*(blockIdx.y / numFilterBlocks);
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * filtersPerThread;
const int tidx = threadIdx.y * B_X + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
const int startPxX = MAX(0, -sizeX/2 + blockPxX);
const int startPxY = MAX(0, -sizeX/2 + blockPxY);
const int endPxX = MIN(imgSize, blockPxX + DIVUP(sizeX, 2) + 3);
const int endPxY = MIN(imgSize, blockPxY + DIVUP(sizeX, 2) + 3);
const int myPxX = blockPxX + threadIdx.y % 4;
const int myPxY = blockPxY + threadIdx.y / 4;
const int myPxIdx = myPxY * imgSize + myPxX;
// const bool doWork = myPxX < imgSize && myPxY < imgSize;
const int myStartPxY = -sizeX/2 + myPxY;
const int myStartPxX = -sizeX/2 + myPxX;
const int myEndPxY = myPxY + DIVUP(sizeX, 2);
const int myEndPxX = myPxX + DIVUP(sizeX, 2);
const int imgIdx = blockImgIdx + threadIdx.x;
imgs += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx;
meanDiffs += (blockFilterIdx + loadY) * imgPixels * numImages + blockImgIdx + loadX;
denoms += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx;
target += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] = 0;
}
}
}
for (int y = startPxY; y < endPxY; y++) {
const bool isInY = y >= myStartPxY && y < myEndPxY;
for (int x = startPxX; x < endPxX; x++) {
const int px = y * imgSize + x;
// All the threads load a pixel from memory
#pragma unroll
for (int ly = 0; ly < filtersPerThread; ly += B_X/2) {
if (filtersPerThread % (B_X/2) == 0 || ly + loadY < filtersPerThread) {
#pragma unroll
for (int lx = 0; lx < B_X*imgsPerThread; lx += 32) {
if (!checkCaseBounds || lx + loadX + blockImgIdx < numImages) {
shDiffs[ly + loadY][lx + loadX] = meanDiffs[(ly * imgPixels + px) * numImages + lx];
}
}
}
}
__syncthreads();
// Each row of threads decides if it's interested in this pixel
if (isInY && x >= myStartPxX && x < myEndPxX) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] += square(shDiffs[f][threadIdx.x + i * B_X]);
}
}
}
}
__syncthreads();
}
}
// imgs -= (loadY * imgPixels - myPxIdx) * numImages + loadX;
// imgs += threadIdx.x;
if (myPxX < imgSize && myPxY < imgSize) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] = 1 + addScale * prod[f][i];
denoms[f * imgPixels * numImages + i * B_X] = prod[f][i];
target[f * imgPixels * numImages + i * B_X] = imgs[f * imgPixels * numImages + i * B_X] * __powf(prod[f][i], -powScale);
}
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y
*
* So each block does one pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* imgs: (numFilters, imgPixels, numImages)
* meanDiffs: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages) (out)
* target: (numFilters, imgPixels, numImages) (out)
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
* numFilters must be divisible by B_Y
*/
template<int B_Y, int B_X, int imgsPerThread, bool checkCaseBounds, bool blocked>
__global__ void kFCNorm(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize,
const int numFilters, const int numImages, const int sizeF,
const float addScale, const float powScale) {
const int imgPixels = imgSize * imgSize;
const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread);
const int numFilterBlocks = numFilters/B_Y;
const int pxIdxX = blockIdx.x / numImgBlocks;
const int pxIdxY = blockIdx.y / numFilterBlocks;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int filterIdx = (blockIdx.y % numFilterBlocks) * B_Y + threadIdx.y;
const int pxIdx = pxIdxY * imgSize + pxIdxX;
const int imgIdx = blockImgIdx + threadIdx.x;
imgs += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx;
meanDiffs += pxIdx * numImages + imgIdx;
denoms += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx;
target += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx;
float prod[imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
prod[i] = 0;
}
}
const int startF = blocked ? (filterIdx / sizeF) * sizeF : -sizeF/2 + filterIdx;
const int loopStartF = blocked ? startF : MAX(0, startF);
const int loopEndF = MIN(numFilters, startF + sizeF);
for (int f = loopStartF; f < loopEndF; ++f) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
prod[i] += square(meanDiffs[f * imgPixels * numImages + i * B_X]);
}
}
}
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
prod[i] = 1 + addScale * prod[i];
denoms[i * B_X] = prod[i];
target[i * B_X] = imgs[i * B_X] * __powf(prod[i], -powScale);
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y
*
* So each block does one output pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* outGrads: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages)
* inputs: (numFilters, imgPixels, numImages)
* acts: (numFilters, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages)
*
* numImages must be divisible by B_X*imgsPerThread
* numFilters must be divisible by B_Y
*
* TODO: this isn't really ideal
*/
template<int B_Y, int B_X, int imgsPerThread, bool add, bool checkCaseBounds, bool blocked>
__global__ void kFRNormUndo(float* outGrads, float* denoms, float* inputs, float* acts, float* target, const int imgSize, const int numFilters,
const int numImages, const int sizeF, const float powScale, const float scaleTargets, const float scaleOutputs) {
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int numFilterBlocks = numFilters/B_Y;
const int pxIdxX = blockIdx.x / numImgBlocks;
const int pxIdxY = blockIdx.y / numFilterBlocks;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int filterIdx = (blockIdx.y % numFilterBlocks) * B_Y + threadIdx.y;
const int imgPixels = imgSize * imgSize;
const int pxIdx = pxIdxY * imgSize + pxIdxX;
const int imgIdx = blockImgIdx + threadIdx.x;
acts += pxIdx * numImages + imgIdx;
inputs += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx;
denoms += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx;
outGrads += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx;
target += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx;
float prod[imgsPerThread];
// if (imgIdx != 0 || pxIdx != 0 || filterIdx != 0) {
// return;
// }
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[i] = 0;
}
const int startF = blocked ? (filterIdx / sizeF) * sizeF : -sizeF + sizeF/2 + 1 + filterIdx;
const int loopStartF = blocked ? startF : MAX(0, startF);
const int loopEndF = MIN(numFilters, startF + sizeF);
for (int f = loopStartF; f < loopEndF; ++f) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
prod[i] += acts[f * imgPixels * numImages + i * B_X];
}
}
}
// printf("gpu f start: %d, end: %d\n", loopStartF, loopEndF);
if (!add) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
const float inp = inputs[i * B_X];
const float out = outGrads[i * B_X];
const float den = denoms[i * B_X];
prod[i] = inp * prod[i] + out * __powf(den, -powScale);
target[i * B_X] = prod[i];
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
const float inp = inputs[i * B_X];
const float out = outGrads[i * B_X];
const float den = denoms[i * B_X];
prod[i] = inp * prod[i] + out * __powf(den, -powScale);
target[i * B_X] = scaleTargets * target[i * B_X] + scaleOutputs * prod[i];
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread
*
* So each block does one pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* imgs: (numFilters, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages)
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
* numFilters must be divisible by B_Y*filtersPerThread
*
* sizeX should be something like 3 or 5 for this function. Not much more.
* TODO: write variant where each block does 4x4 region or so (this'll be based on kCNorm2).
*/
template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds>
__global__ void kTICA_manyfilter(float* imgs, float* target, const int imgSize,
const int numFilters, const int numImages, const int sizeX,
const float scaleTarget, const float scaleOutput) {
const int imgPixels = imgSize * imgSize;
const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread);
const int numFilterBlocks = numFilters/(B_Y*filtersPerThread);
const int pxIdxX = blockIdx.x / numImgBlocks;
const int pxIdxY = blockIdx.y / numFilterBlocks;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * B_Y * filtersPerThread;
const int pxIdx = pxIdxY * imgSize + pxIdxX;
const int startPxX = -sizeX/2 + pxIdxX;
const int startPxY = -sizeX/2 + pxIdxY;
const int imgIdx = blockImgIdx + threadIdx.x;
imgs += ((blockFilterIdx + threadIdx.y) * imgPixels) * numImages + imgIdx;
target += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] = 0;
}
}
}
const int loopStartY = MAX(0, startPxY);
const int loopStartX = MAX(0, startPxX);
const int loopEndY = MIN(imgSize, startPxY + sizeX);
const int loopEndX = MIN(imgSize, startPxX + sizeX);
for (int y = loopStartY; y < loopEndY; y++) {
for (int x = loopStartX; x < loopEndX; x++) {
const int imgPx = y * imgSize + x;
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] += square(imgs[(f * B_Y * imgPixels + imgPx) * numImages + i * B_X]);
}
}
}
}
}
imgs += pxIdx * numImages;
if (scaleTarget == 0) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
target[f * B_Y * imgPixels * numImages + i * B_X] = scaleOutput * __fdividef(1.0f, 0.001 + sqrtf(prod[f][i]));
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTarget * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutput * __fdividef(1.0f, 0.001 + sqrtf(prod[f][i]));
}
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread
*
* So each block does one pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* imgs: (numFilters, imgPixels, numImages)
* ticas: (numFilters, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages)
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
* numFilters must be divisible by B_Y*filtersPerThread
*
* sizeX should be something like 3 or 5 for this function. Not much more.
* TODO: write variant where each block does 4x4 region or so (this'll be based on kCNorm2).
*/
template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds>
__global__ void kTICAGrad_manyfilter(float* imgs, float* ticas, float* target, const int imgSize,
const int numFilters, const int numImages, const int sizeX,
const float scaleTarget, const float scaleOutput) {
const int imgPixels = imgSize * imgSize;
const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread);
const int numFilterBlocks = numFilters/(B_Y*filtersPerThread);
const int pxIdxX = blockIdx.x / numImgBlocks;
const int pxIdxY = blockIdx.y / numFilterBlocks;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * B_Y * filtersPerThread;
const int pxIdx = pxIdxY * imgSize + pxIdxX;
const int startPxX = -sizeX/2 + pxIdxX;
const int startPxY = -sizeX/2 + pxIdxY;
const int imgIdx = blockImgIdx + threadIdx.x;
imgs += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx;
ticas += ((blockFilterIdx + threadIdx.y) * imgPixels) * numImages + imgIdx;
target += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] = 0;
}
}
}
const int loopStartY = MAX(0, startPxY);
const int loopStartX = MAX(0, startPxX);
const int loopEndY = MIN(imgSize, startPxY + sizeX);
const int loopEndX = MIN(imgSize, startPxX + sizeX);
for (int y = loopStartY; y < loopEndY; y++) {
for (int x = loopStartX; x < loopEndX; x++) {
const int imgPx = y * imgSize + x;
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
// adding 1/S values
prod[f][i] += ticas[(f * B_Y * imgPixels + imgPx) * numImages + i * B_X];
}
}
}
}
}
if (scaleTarget == 0) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
target[f * B_Y * imgPixels * numImages + i * B_X] = scaleOutput * -imgs[f * B_Y * imgPixels * numImages + i * B_X] * prod[f][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTarget * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutput * -imgs[f * B_Y * imgPixels * numImages + i * B_X] * sqrtf(prod[f][i]);
}
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread
*
* So each block does one output pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* imgs: (numFilters, imgPixels, numImages)
* maxGrads: (numFilters, numOutputs, numImages)
* rMaxActs: (numFilters, numOutputs, numImages)
* target: (numFilters, imgPixels, numImages)
*
* numImages must be divisible by B_X*imgsPerThread
* numFilters must be divisible by B_Y*filtersPerThread
*/
template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds>
__global__ void kLocalAvgUndo(float* avgGrads, float* target, const int imgSize, const int numFilters,
const int numImages, const int subsX, const int startX, const int strideX, const int outputsX,
const float scaleTargets, const float scaleOutputs) {
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int blockPxX = blockIdx.x / numImgBlocks;
const int blockPxY = blockIdx.y / (numFilters/(B_Y*filtersPerThread));
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % (numFilters/(B_Y*filtersPerThread))) * B_Y * filtersPerThread;
const int blockPx = blockPxY * imgSize + blockPxX;
const int numOutputs = outputsX * outputsX;
const int imgPixels = imgSize * imgSize;
const int startOutputY = blockPxY - startX < subsX ? 0 : 1 + (blockPxY - startX - subsX) / strideX;
const int endOutputY = MIN(outputsX, 1 + (blockPxY - startX) / strideX);
const int startOutputX = blockPxX - startX < subsX ? 0 : 1 + (blockPxX - startX - subsX) / strideX;
const int endOutputX = MIN(outputsX, 1 + (blockPxX - startX) / strideX);
const int imgIdx = blockImgIdx + threadIdx.x;
avgGrads += ((blockFilterIdx + threadIdx.y) * numOutputs) * numImages + imgIdx;
target += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[f][i] = 0;
}
}
if (blockPxX >= startX && blockPxX < startX + strideX * (outputsX-1) + subsX
&& blockPxY >= startX && blockPxY < startX + strideX * (outputsX-1) + subsX) {
for (int my = startOutputY; my < endOutputY; my++) {
const float regionStartY = fmaxf(0, startX + my * strideX);
const float regionEndY = fminf(imgSize, startX + my * strideX + subsX);
const float regionSizeY = regionEndY - regionStartY;
for (int mx = startOutputX; mx < endOutputX; mx++) {
const int outputIdx = my * outputsX + mx;
const float regionStartX = fmaxf(0, startX + mx * strideX);
const float regionEndX = fminf(imgSize, startX + mx * strideX + subsX);
const float regionSizeX = regionEndX - regionStartX;
// It's important to do the division here, because pushing division into the below
// loops makes the code 4x slower.
const float regionSizeInv = 1.0f / (regionSizeX * regionSizeY);
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] += avgGrads[(f * B_Y * numOutputs + outputIdx) * numImages + i * B_X] * regionSizeInv;
}
}
}
}
}
}
if (!add) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
target[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i];
}
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread
*
* So each block does one output pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* imgs: (numFilters, imgPixels, numImages)
* maxGrads: (numFilters, numOutputs, numImages)
* maxActs: (numFilters, numOutputs, numImages)
* target: (numFilters, imgPixels, numImages)
*
* numImages must be divisible by B_X*imgsPerThread
* numFilters must be divisible by B_Y*filtersPerThread
*/
template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds>
__global__ void kLocalMaxUndo(float* imgs, float* maxGrads, float* maxActs, float* target, const int imgSize, const int numFilters,
const int numImages, const int subsX, const int startX, const int strideX, const int outputsX,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shImgs[B_Y*filtersPerThread][B_X*imgsPerThread];
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int blockPxX = blockIdx.x / numImgBlocks;
const int blockPxY = blockIdx.y / (numFilters/(B_Y*filtersPerThread));
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % (numFilters/(B_Y*filtersPerThread))) * B_Y * filtersPerThread;
const int blockPx = blockPxY * imgSize + blockPxX;
const int numOutputs = outputsX * outputsX;
const int imgPixels = imgSize * imgSize;
const int startOutputY = blockPxY - startX < subsX ? 0 : 1 + (blockPxY - startX - subsX) / strideX;
const int endOutputY = MIN(outputsX, 1 + (blockPxY - startX) / strideX);
const int startOutputX = blockPxX - startX < subsX ? 0 : 1 + (blockPxX - startX - subsX) / strideX;
const int endOutputX = MIN(outputsX, 1 + (blockPxX - startX) / strideX);
const int imgIdx = blockImgIdx + threadIdx.x;
imgs += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx;
maxGrads += ((blockFilterIdx + threadIdx.y) * numOutputs) * numImages
+ imgIdx;
maxActs += ((blockFilterIdx + threadIdx.y) * numOutputs) * numImages
+ imgIdx;
target += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[f][i] = 0;
}
}
if (blockPxX >= startX && blockPxX < startX + strideX * (outputsX-1) + subsX
&& blockPxY >= startX && blockPxY < startX + strideX * (outputsX-1) + subsX) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
shImgs[threadIdx.y + B_Y * f][threadIdx.x + B_X * i] = imgs[f * B_Y * imgPixels * numImages + i * B_X];
}
}
}
for (int my = startOutputY; my < endOutputY; my++) {
for (int mx = startOutputX; mx < endOutputX; mx++) {
const int outputIdx = my * outputsX + mx;
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
const float ma = maxActs[(f * B_Y * numOutputs + outputIdx) * numImages + i * B_X];
const float mg = maxGrads[(f * B_Y * numOutputs + outputIdx) * numImages + i * B_X];
const float img = shImgs[threadIdx.y + B_Y * f][threadIdx.x + B_X * i];
prod[f][i] += (img == ma) * mg;
}
}
}
}
}
}
if (!add) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
target[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i];
}
}
}
}
}
/*
* acts := -2 x scale x acts x outGrads / denoms
*/
template<int B_X, int eltsPerThread>
__global__ void kRNormUndoPrelims(float* acts, float* denoms, float* outGrads,
const uint numElements, const float scale) {
const uint e = B_X * blockIdx.x * eltsPerThread + threadIdx.x;
const uint numThreads = B_X * gridDim.x;
for (uint i = e; i < numElements; i += numThreads*eltsPerThread) {
#pragma unroll
for (uint k = 0; k < eltsPerThread; k++) {
if (i + k * B_X < numElements) {
acts[i + k * B_X] = __fdividef(scale*outGrads[i + k * B_X] * acts[i + k * B_X], denoms[i + k * B_X]);
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread
*
* So each block does one output pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* outGrads: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages)
* inputs: (numFilters, imgPixels, numImages)
* acts: (numFilters, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages)
*
* numImages must be divisible by B_X*imgsPerThread
* numFilters must be divisible by B_Y*filtersPerThread
*
* TODO: this isn't really ideal
*/
template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds>
__global__ void kRNormUndo(float* outGrads, float* denoms, float* inputs, float* acts, float* target, const int imgSize, const int numFilters,
const int numImages, const int sizeX, const float powScale, const float scaleTargets, const float scaleOutputs) {
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int numFilterBlocks = numFilters/(B_Y*filtersPerThread);
const int blockPxX = blockIdx.x / numImgBlocks;
const int blockPxY = blockIdx.y / numFilterBlocks;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * B_Y * filtersPerThread;
const int blockPx = blockPxY * imgSize + blockPxX;
const int imgPixels = imgSize * imgSize;
const int startY = MAX(0, blockPxY + sizeX/2 - sizeX + 1);
const int startX = MAX(0, blockPxX + sizeX/2 - sizeX + 1);
const int endY = MIN(imgSize, blockPxY + sizeX/2 + 1);
const int endX = MIN(imgSize, blockPxX + sizeX/2 + 1);
const int imgIdx = blockImgIdx + threadIdx.x;
acts += ((blockFilterIdx + threadIdx.y) * imgPixels) * numImages + imgIdx;
inputs += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx;
denoms += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx;
outGrads += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx;
target += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[f][i] = 0;
}
}
for (int sy = startY; sy < endY; sy++) {
for (int sx = startX; sx < endX; sx++) {
const int outPx = sy * imgSize + sx;
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] += acts[(f * B_Y * imgPixels + outPx) * numImages + i * B_X];
}
}
}
}
}
// outGrads += blockPx * numImages;
if (!add) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
const float inp = inputs[(f * B_Y * imgPixels) * numImages + i * B_X];
const float out = outGrads[(f * B_Y * imgPixels) * numImages + i * B_X];
const float den = denoms[(f * B_Y * imgPixels) * numImages + i * B_X];
prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale);
target[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
const float inp = inputs[(f * B_Y * imgPixels) * numImages + i * B_X];
const float out = outGrads[(f * B_Y * imgPixels) * numImages + i * B_X];
const float den = denoms[(f * B_Y * imgPixels) * numImages + i * B_X];
prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale);
target[f * B_Y * imgPixels * numImages + i * B_X] =
scaleTargets * target[f * B_Y * imgPixels * numImages + i * B_X]
+ scaleOutputs * prod[f][i];
}
}
}
}
}
/*
* Block size 16xB_X
* blockIdx.x determines 4x4 pixel.x region, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines 4x4 pixel.y region, filter idx in batches of filtersPerThread
*
* So each block does 4x4 region for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines pixel idx
*
* outGrads: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages)
* inputs: (numFilters, imgPixels, numImages)
* acts: (numFilters, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages)
*
* B_X one of 8, 16, 32
* imgsPerThread one of 1, 2, 4, 8, 16
*
* B_XximgsPerThread MUST be divisible by 32.
* Number of filters MUST be divisible by filtersPerThread.
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
* numFilters must be divisible by filtersPerThread
*
* Final write-out will not be fully coalesced unless B_X is 32. But there's a lot more
* reading than writing here, and the reading is all coalesced, so it should be OK.
*/
template<int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds>
__global__ void kRNormUndo2(float* outGrads, float* denoms, float* inputs, float* acts, float* target, const int imgSize, const int numFilters,
const int numImages, const int sizeX, const float powScale, const float scaleTargets, const float scaleOutputs) {
__shared__ float shActs[filtersPerThread][B_X*imgsPerThread];
const int imgPixels = imgSize * imgSize;
const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread);
const int numFilterBlocks = numFilters/(filtersPerThread);
const int blockPxX = 4*(blockIdx.x / numImgBlocks);
const int blockPxY = 4*(blockIdx.y / numFilterBlocks);
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * filtersPerThread;
const int tidx = threadIdx.y * B_X + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
const int startPxX = MAX(0, -DIVUP(sizeX,2) + blockPxX + 1);
const int startPxY = MAX(0, -DIVUP(sizeX,2) + blockPxY + 1);
const int endPxX = MIN(imgSize, blockPxX + sizeX/2 + 4);
const int endPxY = MIN(imgSize, blockPxY + sizeX/2 + 4);
const int myPxX = blockPxX + threadIdx.y % 4;
const int myPxY = blockPxY + threadIdx.y / 4;
const int myPxIdx = myPxY * imgSize + myPxX;
// const bool doWork = myPxX < imgSize && myPxY < imgSize;
const int myStartPxY = -DIVUP(sizeX,2) + myPxY + 1;
const int myStartPxX = -DIVUP(sizeX,2) + myPxX + 1;
const int myEndPxY = myPxY + sizeX/2 + 1;
const int myEndPxX = myPxX + sizeX/2 + 1;
const int imgIdx = blockImgIdx + threadIdx.x;
acts += (blockFilterIdx + loadY) * imgPixels * numImages + blockImgIdx + loadX;
denoms += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx;
inputs += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx;
outGrads += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx;
target += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[f][i] = 0;
}
}
for (int y = startPxY; y < endPxY; y++) {
const bool isInY = y >= myStartPxY && y < myEndPxY;
for (int x = startPxX; x < endPxX; x++) {
const int px = y * imgSize + x;
// All the threads load a pixel from memory
#pragma unroll
for (int ly = 0; ly < filtersPerThread; ly += B_X/2) {
if (filtersPerThread % (B_X/2) == 0 || ly + loadY < filtersPerThread) {
#pragma unroll
for (int lx = 0; lx < B_X*imgsPerThread; lx += 32) {
if (!checkCaseBounds || lx + loadX + blockImgIdx < numImages) {
shActs[ly + loadY][lx + loadX] = acts[(ly * imgPixels + px) * numImages + lx];
}
}
}
}
__syncthreads();
// Each row of threads decides if it's interested in this pixel
if (isInY && x >= myStartPxX && x < myEndPxX) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] += shActs[f][threadIdx.x + i * B_X];
}
}
}
}
__syncthreads();
}
}
acts -= (loadY * imgPixels - myPxIdx) * numImages + loadX;
acts += threadIdx.x;
if (myPxX < imgSize && myPxY < imgSize) {
if (!add) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
const float out = outGrads[f * imgPixels * numImages + i * B_X];
const float den = denoms[f * imgPixels * numImages + i * B_X];
const float inp = inputs[f * imgPixels * numImages + i * B_X];
prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale);
target[f * imgPixels * numImages + i * B_X] = prod[f][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
const float out = outGrads[f * imgPixels * numImages + i * B_X];
const float den = denoms[f * imgPixels * numImages + i * B_X];
const float inp = inputs[f * imgPixels * numImages + i * B_X];
prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale);
target[f * imgPixels * numImages + i * B_X] = scaleTargets * target[f * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i];
}
}
}
}
}
}
void convLocalMaxUndo(NVMatrix& images, NVMatrix& maxGrads, NVMatrix& maxActs, NVMatrix& target,
int subsX, int startX, int strideX, int outputsX) {
convLocalMaxUndo(images, maxGrads, maxActs, target, subsX, startX, strideX, outputsX, 0, 1);
}
/*
* imgs: (numFilters, imgPixels, numImages)
* maxGrads: (numFilters, numOutputs, numImages)
* rMaxActs: (numFilters, numOutputs, numImages)
* target: (numFilters, imgPixels, numImages)
*/
void convLocalMaxUndo(NVMatrix& images, NVMatrix& maxGrads, NVMatrix& maxActs, NVMatrix& target,
int subsX, int startX, int strideX, int outputsX, float scaleTargets, float scaleOutput) {
int outputs = outputsX * outputsX;
int numImages = images.getNumCols();
int numFilters = maxGrads.getNumRows() / outputs;
int imgPixels = images.getNumRows() / numFilters;
assert(images.getNumRows() == numFilters * imgPixels);
int imgSize = int(sqrt(imgPixels));
assert(imgSize * imgSize == imgPixels);
assert(maxGrads.getNumRows() == numFilters * outputs);
assert(maxGrads.getNumCols() == numImages);
assert(!images.isTrans());
assert(!target.isTrans());
assert(!maxGrads.isTrans());
assert(!maxActs.isTrans());
assert(images.isContiguous());
assert(maxGrads.isContiguous());
assert(maxActs.isContiguous());
assert(maxGrads.isSameDims(maxActs));
assert(numFilters % 16 == 0);
// assert(numImages % 128 == 0);
assert(strideX <= subsX);
target.resize(images);
assert(target.isContiguous());
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
int checkCaseBounds = numImages % (32*imgsPerThread) != 0;
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages,32*imgsPerThread) * imgSize, (numFilters / (4 * 2)) * imgSize);
if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
kLocalMaxUndo<4, 32, 4, 2, false, true><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
} else {
kLocalMaxUndo<4, 32, 4, 2, true, true><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
kLocalMaxUndo<4, 32, 4, 2, false, false><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
} else {
kLocalMaxUndo<4, 32, 4, 2, true, false><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
kLocalMaxUndo<4, 32, 2, 2, false, true><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
} else {
kLocalMaxUndo<4, 32, 2, 2, true, true><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
kLocalMaxUndo<4, 32, 2, 2, false, false><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
} else {
kLocalMaxUndo<4, 32, 2, 2, true, false><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
kLocalMaxUndo<4, 32, 1, 2, false, true><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
} else {
kLocalMaxUndo<4, 32, 1, 2, true, true><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
kLocalMaxUndo<4, 32, 1, 2, false, false><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
} else {
kLocalMaxUndo<4, 32, 1, 2, true, false><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
}
}
}
getLastCudaError("convLocalMaxUndo: kernel execution failed");
}
void convLocalAvgUndo(NVMatrix& avgGrads, NVMatrix& target, int subsX, int startX, int strideX, int outputsX, int imgSize) {
convLocalAvgUndo(avgGrads, target, subsX, startX, strideX, outputsX, imgSize, 0, 1);
}
/*
* avgGrads: (numFilters, numOutputs, numImages)
* target: (numFilters, imgPixels, numImages)
*/
void convLocalAvgUndo(NVMatrix& avgGrads, NVMatrix& target,
int subsX, int startX, int strideX, int outputsX, int imgSize,
float scaleTargets, float scaleOutput) {
int numImages = avgGrads.getNumCols();
int outputs = outputsX * outputsX;
int imgPixels = imgSize * imgSize;
int numFilters = avgGrads.getNumRows() / outputs;
assert(avgGrads.getNumRows() == numFilters * outputs);
assert(!target.isTrans());
assert(!avgGrads.isTrans());
assert(avgGrads.isContiguous());
assert(numFilters % 16 == 0);
// assert(numImages % 128 == 0);
assert(strideX <= subsX);
target.resize(numFilters * imgPixels, numImages);
assert(target.isContiguous());
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
int checkCaseBounds = numImages % (32*imgsPerThread) != 0;
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages,32*imgsPerThread) * imgSize, (numFilters / (4 * 4)) * imgSize);
if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
kLocalAvgUndo<4, 32, 4, 4, false, true><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
} else {
kLocalAvgUndo<4, 32, 4, 4, true, true><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
kLocalAvgUndo<4, 32, 4, 4, false, false><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
} else {
kLocalAvgUndo<4, 32, 4, 4, true, false><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
kLocalAvgUndo<4, 32, 2, 4, false, true><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
} else {
kLocalAvgUndo<4, 32, 2, 4, true, true><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
kLocalAvgUndo<4, 32, 2, 4, false, false><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
} else {
kLocalAvgUndo<4, 32, 2, 4, true, false><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
kLocalAvgUndo<4, 32, 1, 4, false, true><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
} else {
kLocalAvgUndo<4, 32, 1, 4, true, true><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
kLocalAvgUndo<4, 32, 1, 4, false, false><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
} else {
kLocalAvgUndo<4, 32, 1, 4, true, false><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
}
}
}
getLastCudaError("convLocalAvgUndo: kernel execution failed");
}
void convResponseNorm(NVMatrix& images, NVMatrix& denoms, NVMatrix& target, int numFilters, int sizeX, float addScale, float powScale) {
convContrastNorm(images, images, denoms, target, numFilters, sizeX, addScale, powScale);
}
/*
* images: (numFilters, imgPixels, numImages)
* meanDiffs: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages) (out)
* target: (numFilters, imgPixels, numImages) (out)
*/
void convContrastNorm(NVMatrix& images, NVMatrix& meanDiffs, NVMatrix& denoms, NVMatrix& target, int numFilters, int sizeX, float addScale, float powScale) {
int numImages = images.getNumCols();
int imgPixels = images.getNumRows() / numFilters;
assert(images.getNumRows() == numFilters * imgPixels);
int imgSize = int(sqrt(imgPixels));
assert(imgSize * imgSize == imgPixels);
assert(meanDiffs.isSameDims(images));
assert(!meanDiffs.isTrans());
assert(!images.isTrans());
assert(images.isContiguous());
assert(meanDiffs.isContiguous());
assert(numFilters % 16 == 0 || numFilters <= 8);
target.resize(images);
denoms.resize(images);
assert(target.isContiguous());
if (sizeX >= 6 && numFilters % 4 == 0) {
// This one is faster for large regions (my tests show regions >= 6...)
int imgsPerThread = 8;
int filtersPerThread = 4;
int bx = 8;
bool checkCaseBounds = numImages % (bx*imgsPerThread) != 0;
assert((imgsPerThread * bx) % 32 == 0);
assert(numFilters % filtersPerThread == 0);
dim3 threads(bx, 16);
dim3 blocks(DIVUP(imgSize, 4) * DIVUP(numImages, bx*imgsPerThread), DIVUP(imgSize, 4) * numFilters / filtersPerThread);
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kCNorm2<8, 8, 4, true>, cudaFuncCachePreferL1); // L1 faster here
kCNorm2<8, 8, 4, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, sizeX, addScale, powScale);
} else {
cudaFuncSetCacheConfig(kCNorm2<8, 8, 4, false>, cudaFuncCachePreferL1); // L1 faster here
kCNorm2<8, 8, 4, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, sizeX, addScale, powScale);
}
} else {
bool checkCaseBounds = numImages % 128 != 0;
if (numFilters <= 8) {
dim3 threads(128);
dim3 blocks(DIVUP(numImages,128) * imgSize, imgSize);
if (numFilters == 1) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 1, true>, cudaFuncCachePreferL1);
kCNorm_fewfilter<1, 1, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
} else {
cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 1, false>, cudaFuncCachePreferL1);
kCNorm_fewfilter<1, 1, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
}
} else if (numFilters == 2) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 2, true>, cudaFuncCachePreferL1);
kCNorm_fewfilter<1, 2, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
} else {
cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 2, false>, cudaFuncCachePreferL1);
kCNorm_fewfilter<1, 2, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
}
} else if (numFilters == 3) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 3, true>, cudaFuncCachePreferL1);
kCNorm_fewfilter<1, 3, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
} else {
cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 3, false>, cudaFuncCachePreferL1);
kCNorm_fewfilter<1, 3, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
}
} else if (numFilters == 4) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 4, true>, cudaFuncCachePreferL1);
kCNorm_fewfilter<1, 4, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
} else {
cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 4, false>, cudaFuncCachePreferL1);
kCNorm_fewfilter<1, 4, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
}
} else if (numFilters == 5) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 5, true>, cudaFuncCachePreferL1);
kCNorm_fewfilter<1, 5, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
} else {
cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 5, false>, cudaFuncCachePreferL1);
kCNorm_fewfilter<1, 5, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
}
} else if (numFilters == 6) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 6, true>, cudaFuncCachePreferL1);
kCNorm_fewfilter<1, 6, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
} else {
cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 6, false>, cudaFuncCachePreferL1);
kCNorm_fewfilter<1, 6, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
}
} else if (numFilters == 7) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 7, true>, cudaFuncCachePreferL1);
kCNorm_fewfilter<1, 7, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
} else {
cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 7, false>, cudaFuncCachePreferL1);
kCNorm_fewfilter<1, 7, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
}
} else if (numFilters == 8) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 8, true>, cudaFuncCachePreferL1);
kCNorm_fewfilter<1, 8, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
} else {
cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 8, false>, cudaFuncCachePreferL1);
kCNorm_fewfilter<1, 8, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
}
}
} else {
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages,32*4) * imgSize, (numFilters / (4 * 2)) * imgSize);
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kCNorm_manyfilter<4, 32, 4, 2, true>, cudaFuncCachePreferL1);
kCNorm_manyfilter<4, 32, 4, 2, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, sizeX, addScale, powScale);
} else {
cudaFuncSetCacheConfig(kCNorm_manyfilter<4, 32, 4, 2, false>, cudaFuncCachePreferL1);
kCNorm_manyfilter<4, 32, 4, 2, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, sizeX, addScale, powScale);
}
}
}
getLastCudaError("convResponseNorm: kernel execution failed");
}
void convContrastNormUndo(NVMatrix& outGrads, NVMatrix& denoms, NVMatrix& meanDiffs, NVMatrix& acts, NVMatrix& target, int numFilters,
int sizeX, float addScale, float powScale, float scaleTargets, float scaleOutput) {
convResponseNormUndo(outGrads, denoms, meanDiffs, acts, target, numFilters, sizeX, addScale, powScale, scaleTargets, scaleOutput);
}
/*
* outGrads: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages)
* inputs: (numFilters, imgPixels, numImages)
* acts: (numFilters, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages)
*
* THIS WILL OVERWRITE THE ACTS MATRIX.
*/
void convResponseNormUndo(NVMatrix& outGrads, NVMatrix& denoms, NVMatrix& inputs, NVMatrix& acts, NVMatrix& target, int numFilters,
int sizeX, float addScale, float powScale, float scaleTargets, float scaleOutput) {
int numImages = outGrads.getNumCols();
int imgPixels = outGrads.getNumRows() / numFilters;
int imgSize = int(sqrt(imgPixels));
assert(imgSize * imgSize == imgPixels);
assert(outGrads.getNumRows() == numFilters * imgPixels);
assert(denoms.isSameDims(outGrads));
assert(acts.isSameDims(denoms));
assert(!denoms.isTrans());
assert(!outGrads.isTrans());
assert(!acts.isTrans());
assert(!target.isTrans());
assert(outGrads.isContiguous());
assert(numFilters % 16 == 0);
target.resize(outGrads);
assert(target.isContiguous());
// First do acts := -2 x scale x acts x outGrads / denoms
// so that the main routine only has to do an addition in its inner loop.
int prelimEltsPerThread = 4;
dim3 threads(128);
dim3 blocks(MIN(512, DIVUP(outGrads.getNumElements(),(threads.x * prelimEltsPerThread))));
kRNormUndoPrelims<128, 4><<<blocks, threads>>>(acts.getDevData(), denoms.getDevData(), outGrads.getDevData(), outGrads.getNumElements(), -2*addScale*powScale);
// Now the main routine
if (sizeX >= 6 && numFilters % 4 == 0) {
// This one is faster for large regions (my tests show regions >= 6...)
int imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2;
int filtersPerThread = 4;
int bx = 16;
bool checkCaseBounds = numImages % (bx*imgsPerThread) != 0;
assert((imgsPerThread * bx) % 32 == 0);
threads = dim3(bx, 16);
blocks = dim3(DIVUP(imgSize, 4) * DIVUP(numImages, bx*imgsPerThread), DIVUP(imgSize, 4) * numFilters / filtersPerThread);
if (imgsPerThread == 8) {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
cudaFuncSetCacheConfig(kRNormUndo2<16, 8, 4, true, true>, cudaFuncCachePreferL1);
kRNormUndo2<16, 8, 4, true, true><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kRNormUndo2<16, 8, 4, false, true>, cudaFuncCachePreferL1);
kRNormUndo2<16, 8, 4, false, true><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
cudaFuncSetCacheConfig(kRNormUndo2<16, 8, 4, true, false>, cudaFuncCachePreferL1);
kRNormUndo2<16, 8, 4, true, false><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kRNormUndo2<16, 8, 4, false, false>, cudaFuncCachePreferL1);
kRNormUndo2<16, 8, 4, false, false><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
cudaFuncSetCacheConfig(kRNormUndo2<16, 4, 4, true, true>, cudaFuncCachePreferL1);
kRNormUndo2<16, 4, 4, true, true><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kRNormUndo2<16, 4, 4, false, true>, cudaFuncCachePreferL1);
kRNormUndo2<16, 4, 4, false, true><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
cudaFuncSetCacheConfig(kRNormUndo2<16, 4, 4, true, false>, cudaFuncCachePreferL1);
kRNormUndo2<16, 4, 4, true, false><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kRNormUndo2<16, 4, 4, false, false>, cudaFuncCachePreferL1);
kRNormUndo2<16, 4, 4, false, false><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
cudaFuncSetCacheConfig(kRNormUndo2<16, 2, 4, true, true>, cudaFuncCachePreferL1);
kRNormUndo2<16, 2, 4, true, true><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kRNormUndo2<16, 2, 4, false, true>, cudaFuncCachePreferL1);
kRNormUndo2<16, 2, 4, false, true><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
cudaFuncSetCacheConfig(kRNormUndo2<16, 2, 4, true, false>, cudaFuncCachePreferL1);
kRNormUndo2<16, 2, 4, true, false><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kRNormUndo2<16, 2, 4, false, false>, cudaFuncCachePreferL1);
kRNormUndo2<16, 2, 4, false, false><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
}
}
} else {
int imgsPerThread = numImages % 64 == 0 ? 2 : 1;
bool checkCaseBounds = numImages % (32*imgsPerThread) != 0;
threads = dim3(32, 4);
blocks = dim3(DIVUP(numImages,32*imgsPerThread) * imgSize, (numFilters / (4 * 2)) * imgSize);
if (imgsPerThread == 2) {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
cudaFuncSetCacheConfig(kRNormUndo<4, 32, 2, 2, false, true>, cudaFuncCachePreferL1);
kRNormUndo<4, 32, 2, 2, false, true><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kRNormUndo<4, 32, 2, 2, true, true>, cudaFuncCachePreferL1);
kRNormUndo<4, 32, 2, 2, true, true><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
cudaFuncSetCacheConfig(kRNormUndo<4, 32, 2, 2, false, false>, cudaFuncCachePreferL1);
kRNormUndo<4, 32, 2, 2, false, false><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kRNormUndo<4, 32, 2, 2, true, false>, cudaFuncCachePreferL1);
kRNormUndo<4, 32, 2, 2, true, false><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
cudaFuncSetCacheConfig(kRNormUndo<4, 32, 1, 2, false, true>, cudaFuncCachePreferL1);
kRNormUndo<4, 32, 1, 2, false, true><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kRNormUndo<4, 32, 1, 2, true, true>, cudaFuncCachePreferL1);
kRNormUndo<4, 32, 1, 2, true, true><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
cudaFuncSetCacheConfig(kRNormUndo<4, 32, 1, 2, false, false>, cudaFuncCachePreferL1);
kRNormUndo<4, 32, 1, 2, false, false><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kRNormUndo<4, 32, 1, 2, true, false>, cudaFuncCachePreferL1);
kRNormUndo<4, 32, 1, 2, true, false><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
}
}
}
getLastCudaError("kRNormUndo: kernel execution failed");
}
/*
* imgs: (numChannels, imgPixels, numImages) with given imgStride
* target: (numChannels, tgtPixels, numImages)
*
* imgSize = scale * tgtSize
*/
void convResizeBilinear(NVMatrix& images, NVMatrix& target, int imgSize, int tgtSize, float scale) {
assert(!images.isTrans());
assert(!target.isTrans());
int imgPixels = imgSize * imgSize;
int tgtPixels = tgtSize * tgtSize;
int numChannels = images.getNumRows() / imgPixels;
int numImages = images.getNumCols();
assert(images.getNumRows() == numChannels * imgPixels);
target.resize(numChannels * tgtPixels, numImages);
assert(target.isContiguous());
int numChunksX = DIVUP(tgtSize, 4);
int numChunks = numChunksX * numChunksX;
double imgCenter = imgSize * 0.5;
double tgtCenter = tgtSize * 0.5;
double centerScale = imgCenter - tgtCenter * scale;
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
bool checkCaseBounds = numImages % (32*imgsPerThread) != 0;
dim3 threads(32, 16);
dim3 blocks(DIVUP(numImages, imgsPerThread * 32), numChannels * numChunks);
if (imgsPerThread == 4) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kResizeBilinear<4, true>, cudaFuncCachePreferL1);
kResizeBilinear<4, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, tgtSize, numImages, images.getStride(), scale, centerScale);
} else {
cudaFuncSetCacheConfig(kResizeBilinear<4, false>, cudaFuncCachePreferL1);
kResizeBilinear<4, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, tgtSize, numImages, images.getStride(), scale, centerScale);
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kResizeBilinear<2, true>, cudaFuncCachePreferL1);
kResizeBilinear<2, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, tgtSize, numImages, images.getStride(), scale, centerScale);
} else {
cudaFuncSetCacheConfig(kResizeBilinear<2, false>, cudaFuncCachePreferL1);
kResizeBilinear<2, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, tgtSize, numImages, images.getStride(), scale, centerScale);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kResizeBilinear<1, true>, cudaFuncCachePreferL1);
kResizeBilinear<1, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, tgtSize, numImages, images.getStride(), scale, centerScale);
} else {
cudaFuncSetCacheConfig(kResizeBilinear<1, false>, cudaFuncCachePreferL1);
kResizeBilinear<1, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, tgtSize, numImages, images.getStride(), scale, centerScale);
}
}
getLastCudaError("convResizeBilinear: kernel execution failed");
}
/*
* imgs: (3, imgPixels, numImages) with given imgStride
* target: (3, imgPixels, numImages)
*/
void convRGBToYUV(NVMatrix& images, NVMatrix& target) {
assert(!images.isTrans());
assert(!target.isTrans());
int imgPixels = images.getNumRows() / 3;
int numImages = images.getNumCols();
assert(images.getNumRows() == 3 * imgPixels);
target.resize(3 * imgPixels, numImages);
assert(target.isContiguous());
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
bool checkCaseBounds = numImages % (32*imgsPerThread) != 0;
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages, imgsPerThread * 32), DIVUP(imgPixels, 4));
if (imgsPerThread == 4) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kRGBToYUV<4, true>, cudaFuncCachePreferL1);
kRGBToYUV<4, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
} else {
cudaFuncSetCacheConfig(kRGBToYUV<4, false>, cudaFuncCachePreferL1);
kRGBToYUV<4, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kRGBToYUV<2, true>, cudaFuncCachePreferL1);
kRGBToYUV<2, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
} else {
cudaFuncSetCacheConfig(kRGBToYUV<2, false>, cudaFuncCachePreferL1);
kRGBToYUV<2, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kRGBToYUV<1, true>, cudaFuncCachePreferL1);
kRGBToYUV<1, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
} else {
cudaFuncSetCacheConfig(kRGBToYUV<1, false>, cudaFuncCachePreferL1);
kRGBToYUV<1, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
}
}
getLastCudaError("convRGBToYUV: kernel execution failed");
}
/*
* imgs: (3, imgPixels, numImages) with given imgStride
* target: (3, imgPixels, numImages)
*/
void convRGBToLAB(NVMatrix& images, NVMatrix& target, bool center) {
assert(!images.isTrans());
assert(!target.isTrans());
int imgPixels = images.getNumRows() / 3;
int numImages = images.getNumCols();
assert(images.getNumRows() == 3 * imgPixels);
target.resize(3 * imgPixels, numImages);
assert(target.isContiguous());
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
bool checkCaseBounds = numImages % (32*imgsPerThread) != 0;
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages, imgsPerThread * 32), DIVUP(imgPixels, 4));
if (imgsPerThread == 4) {
if (center) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kRGBToLAB<4, true, true>, cudaFuncCachePreferL1);
kRGBToLAB<4, true, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
} else {
cudaFuncSetCacheConfig(kRGBToLAB<4, false, true>, cudaFuncCachePreferL1);
kRGBToLAB<4, false, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kRGBToLAB<4, true, false>, cudaFuncCachePreferL1);
kRGBToLAB<4, true, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
} else {
cudaFuncSetCacheConfig(kRGBToLAB<4, false, false>, cudaFuncCachePreferL1);
kRGBToLAB<4, false, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
}
}
} else if (imgsPerThread == 2) {
if (center) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kRGBToLAB<2, true, true>, cudaFuncCachePreferL1);
kRGBToLAB<2, true, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
} else {
cudaFuncSetCacheConfig(kRGBToLAB<2, false, true>, cudaFuncCachePreferL1);
kRGBToLAB<2, false, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kRGBToLAB<2, true, false>, cudaFuncCachePreferL1);
kRGBToLAB<2, true, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
} else {
cudaFuncSetCacheConfig(kRGBToLAB<2, false, false>, cudaFuncCachePreferL1);
kRGBToLAB<2, false, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
}
}
} else {
if (center) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kRGBToLAB<1, true, true>, cudaFuncCachePreferL1);
kRGBToLAB<1, true, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
} else {
cudaFuncSetCacheConfig(kRGBToLAB<1, false, true>, cudaFuncCachePreferL1);
kRGBToLAB<1, false, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kRGBToLAB<1, true, false>, cudaFuncCachePreferL1);
kRGBToLAB<1, true, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
} else {
cudaFuncSetCacheConfig(kRGBToLAB<1, false, false>, cudaFuncCachePreferL1);
kRGBToLAB<1, false, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
}
}
}
getLastCudaError("convRGBToLAB: kernel execution failed");
}
/*
* imgs: (numChannels, imgPixels, numImages) with given imgStride
* target: (numChannels, tgtPixels, numImages)
*/
void convCrop(NVMatrix& imgs, NVMatrix& target, int imgSize, int tgtSize, int startY, int startX) {
int numImages = imgs.getNumCols();
int imgPixels = imgSize * imgSize;
int tgtPixels = tgtSize * tgtSize;
int numChannels = imgs.getNumRows() / imgPixels;
assert(imgs.getNumRows() == imgPixels * numChannels);
assert(imgPixels == imgSize * imgSize);
assert(imgSize - startY >= tgtSize);
assert(imgSize - startX >= tgtSize);
assert(startY >= 0);
assert(startX >= 0);
target.resize(numChannels * tgtPixels, numImages);
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
bool checkCaseBounds = numImages % (32*imgsPerThread) != 0;
dim3 blocks(DIVUP(numImages, 32 * imgsPerThread), numChannels * DIVUP(tgtPixels, 4));
dim3 threads(32, 4);
if (imgsPerThread == 4) {
if (checkCaseBounds) {
kCrop<4, true><<<blocks, threads>>>(imgs.getDevData(), target.getDevData(), numImages, imgs.getStride(), imgSize, tgtSize, startY, startX);
} else {
kCrop<4, false><<<blocks, threads>>>(imgs.getDevData(), target.getDevData(), numImages, imgs.getStride(), imgSize, tgtSize, startY, startX);
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
kCrop<2, true><<<blocks, threads>>>(imgs.getDevData(), target.getDevData(), numImages, imgs.getStride(), imgSize, tgtSize, startY, startX);
} else {
kCrop<2, false><<<blocks, threads>>>(imgs.getDevData(), target.getDevData(), numImages, imgs.getStride(), imgSize, tgtSize, startY, startX);
}
} else {
if (checkCaseBounds) {
kCrop<1, true><<<blocks, threads>>>(imgs.getDevData(), target.getDevData(), numImages, imgs.getStride(), imgSize, tgtSize, startY, startX);
} else {
kCrop<1, false><<<blocks, threads>>>(imgs.getDevData(), target.getDevData(), numImages, imgs.getStride(), imgSize, tgtSize, startY, startX);
}
}
getLastCudaError("convCrop: kernel execution failed");
}
/*
* images: (numFilters, imgPixels, numImages)
* ticas: (numFilters, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages) (out)
*
* Computes TICA-style gradient for given feature maps
* f(x) = exp(-(sum_i{x_i^2}^(1/2)))
* dlogf(x)/df(x) = -x_i / (sum_i{x_i^2}^(1/2) + eps)
*
* eps added for numerical stability
*/
void convTICAGrad(NVMatrix& images, NVMatrix& ticas, NVMatrix& target, int numFilters, int sizeX, float scaleTarget, float scaleOutput) {
int numImages = images.getNumCols();
int imgPixels = images.getNumRows() / numFilters;
assert(images.getNumRows() == numFilters * imgPixels);
int imgSize = int(sqrt(imgPixels));
assert(imgSize * imgSize == imgPixels);
assert(!images.isTrans());
assert(images.isContiguous());
assert(numFilters % 16 == 0 || numFilters <= 8);
assert(ticas.isSameDims(images));
assert(ticas.isContiguous());
if (scaleTarget == 0) {
target.resize(images);
} else {
assert(target.isSameDims(images));
}
assert(target.isContiguous());
// TEMPORARY
assert(numFilters > 8);
assert(sizeX < 6);
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages, 32*4) * imgSize, (numFilters / (4 * 2)) * imgSize);
bool checkCaseBounds = (numImages % 128) != 0;
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kTICAGrad_manyfilter<4, 32, 4, 2, true>, cudaFuncCachePreferL1);
kTICAGrad_manyfilter<4, 32, 4, 2, true><<<blocks, threads>>>(images.getDevData(), ticas.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, sizeX, scaleTarget, scaleOutput);
} else {
cudaFuncSetCacheConfig(kTICAGrad_manyfilter<4, 32, 4, 2, false>, cudaFuncCachePreferL1);
kTICAGrad_manyfilter<4, 32, 4, 2, false><<<blocks, threads>>>(images.getDevData(), ticas.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, sizeX, scaleTarget, scaleOutput);
}
getLastCudaError("convTICAGrad: kernel execution failed");
}
/*
* images: (numFilters, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages) (out)
*
* Computes TICA-style gradient for given feature maps
* f(x) = exp(-(sum_i{x_i^2}^(1/2)))
* dlogf(x)/df(x) = -x_i / (sum_i{x_i^2}^(1/2) + eps)
*
* eps added for numerical stability
*/
void convTICA(NVMatrix& images, NVMatrix& target, int numFilters, int sizeX, float scaleTarget, float scaleOutput) {
int numImages = images.getNumCols();
int imgPixels = images.getNumRows() / numFilters;
assert(images.getNumRows() == numFilters * imgPixels);
int imgSize = int(sqrt(imgPixels));
assert(imgSize * imgSize == imgPixels);
assert(!images.isTrans());
assert(images.isContiguous());
assert(numFilters % 16 == 0 || numFilters <= 8);
if (scaleTarget == 0) {
target.resize(images);
} else {
assert(target.isSameDims(images));
}
assert(target.isContiguous());
// TEMPORARY
assert(numFilters > 8);
assert(sizeX < 6);
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages, 32*4) * imgSize, (numFilters / (4 * 2)) * imgSize);
bool checkCaseBounds = (numImages % 128) != 0;
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kTICA_manyfilter<4, 32, 4, 2, true>, cudaFuncCachePreferL1);
kTICA_manyfilter<4, 32, 4, 2, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, sizeX, scaleTarget, scaleOutput);
} else {
cudaFuncSetCacheConfig(kTICA_manyfilter<4, 32, 4, 2, false>, cudaFuncCachePreferL1);
kTICA_manyfilter<4, 32, 4, 2, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, sizeX, scaleTarget, scaleOutput);
}
getLastCudaError("convTICA: kernel execution failed");
}
/*
* images: (numFilters, imgPixels, numImages)
* meanDiffs: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages) (out)
* target: (numFilters, imgPixels, numImages) (out)
* Note: at present, I have no code to compute the meanDiffs. So it should be set
* to be equal to images. In other words, this isn't really doing contrast normalization,
* just response normalization.
*/
void convContrastNormCrossMap(NVMatrix& images, NVMatrix& meanDiffs, NVMatrix& denoms, NVMatrix& target,
int numFilters, int sizeF, float addScale, float powScale, bool blocked) {
int numImages = images.getNumCols();
int imgPixels = images.getNumRows() / numFilters;
assert(images.getNumRows() == numFilters * imgPixels);
int imgSize = int(sqrt(imgPixels));
assert(imgSize * imgSize == imgPixels);
assert(meanDiffs.isSameDims(images));
assert(sizeF > 0 && sizeF <= numFilters);
assert(!meanDiffs.isTrans());
assert(!images.isTrans());
assert(images.isContiguous());
assert(meanDiffs.isContiguous());
assert(numFilters % 16 == 0);
target.resize(images);
denoms.resize(images);
assert(target.isContiguous());
bool checkCaseBounds = numImages % 128 != 0;
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages,32*4) * imgSize, (numFilters / 4) * imgSize);
if (blocked) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kFCNorm<4, 32, 4, true, true>, cudaFuncCachePreferL1);
kFCNorm<4, 32, 4, true, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, sizeF, addScale, powScale);
} else {
cudaFuncSetCacheConfig(kFCNorm<4, 32, 4, false, true>, cudaFuncCachePreferL1);
kFCNorm<4, 32, 4, false, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, sizeF, addScale, powScale);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kFCNorm<4, 32, 4, true, false>, cudaFuncCachePreferL1);
kFCNorm<4, 32, 4, true, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, sizeF, addScale, powScale);
} else {
cudaFuncSetCacheConfig(kFCNorm<4, 32, 4, false, false>, cudaFuncCachePreferL1);
kFCNorm<4, 32, 4, false, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, sizeF, addScale, powScale);
}
}
getLastCudaError("convContrastNormCrossMap: kernel execution failed");
}
/*
* outGrads: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages)
* inputs: (numFilters, imgPixels, numImages)
* acts: (numFilters, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages)
*
* THIS WILL OVERWRITE THE ACTS MATRIX.
*/
void convResponseNormCrossMapUndo(NVMatrix& outGrads, NVMatrix& denoms, NVMatrix& inputs, NVMatrix& acts, NVMatrix& target, int numFilters,
int sizeF, float addScale, float powScale, bool blocked, float scaleTargets, float scaleOutput) {
int numImages = outGrads.getNumCols();
int imgPixels = outGrads.getNumRows() / numFilters;
int imgSize = int(sqrt(imgPixels));
assert(imgSize * imgSize == imgPixels);
assert(sizeF > 0 && sizeF <= numFilters);
assert(outGrads.getNumRows() == numFilters * imgPixels);
assert(denoms.isSameDims(outGrads));
assert(acts.isSameDims(denoms));
assert(!denoms.isTrans());
assert(!outGrads.isTrans());
assert(!acts.isTrans());
assert(!target.isTrans());
assert(outGrads.isContiguous());
assert(numFilters % 16 == 0);
target.resize(outGrads);
assert(target.isContiguous());
// First do acts := -2 x scale x acts x outGrads / denoms
// so that the main routine only has to do an addition in its inner loop.
int prelimEltsPerThread = 4;
dim3 threads(128);
dim3 blocks(MIN(512, DIVUP(outGrads.getNumElements(),(threads.x * prelimEltsPerThread))));
kRNormUndoPrelims<128, 4><<<blocks, threads>>>(acts.getDevData(), denoms.getDevData(), outGrads.getDevData(), outGrads.getNumElements(), -2*addScale*powScale);
// Now the main routine
dim3 threads2 = dim3(32, 4);
dim3 blocks2 = dim3(DIVUP(numImages,32*4) * imgSize, (numFilters / 4) * imgSize);
bool checkCaseBounds = (numImages % 128) != 0;
if (blocked) {
if (scaleTargets == 0 && scaleOutput == 1) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kFRNormUndo<4, 32, 4, false, true, true>, cudaFuncCachePreferL1);
kFRNormUndo<4, 32, 4, false, true, true><<<blocks2, threads2>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale,
scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kFRNormUndo<4, 32, 4, false, false, true>, cudaFuncCachePreferL1);
kFRNormUndo<4, 32, 4, false, false, true><<<blocks2, threads2>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale,
scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kFRNormUndo<4, 32, 4, true, true, true>, cudaFuncCachePreferL1);
kFRNormUndo<4, 32, 4, true, true, true><<<blocks2, threads2>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale,
scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kFRNormUndo<4, 32, 4, true, false, true>, cudaFuncCachePreferL1);
kFRNormUndo<4, 32, 4, true, false, true><<<blocks2, threads2>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale,
scaleTargets, scaleOutput);
}
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kFRNormUndo<4, 32, 4, false, true, false>, cudaFuncCachePreferL1);
kFRNormUndo<4, 32, 4, false, true, false><<<blocks2, threads2>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale,
scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kFRNormUndo<4, 32, 4, false, false, false>, cudaFuncCachePreferL1);
kFRNormUndo<4, 32, 4, false, false, false><<<blocks2, threads2>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale,
scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kFRNormUndo<4, 32, 4, true, true, false>, cudaFuncCachePreferL1);
kFRNormUndo<4, 32, 4, true, true, false><<<blocks2, threads2>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale,
scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kFRNormUndo<4, 32, 4, true, false, false>, cudaFuncCachePreferL1);
kFRNormUndo<4, 32, 4, true, false, false><<<blocks2, threads2>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale,
scaleTargets, scaleOutput);
}
}
}
getLastCudaError("convResponseNormCrossMapUndo: kernel execution failed");
}
void convResponseNormCrossMap(NVMatrix& images, NVMatrix& denoms, NVMatrix& target, int numFilters, int sizeF, float addScale, float powScale, bool blocked) {
convContrastNormCrossMap(images, images, denoms, target, numFilters, sizeF, addScale, powScale, blocked);
}
|
015fd2471056fd9c84fd121b053a658e44e963f5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
#ifdef CUDNN
#pragma comment(lib, "cudnn.lib")
#endif
extern "C" {
}
extern "C" {
double get_time_point();
void start_timer();
void stop_timer();
double get_time();
void stop_timer_and_show();
void stop_timer_and_show_name(char *name);
void show_total_time();
}
__global__ void binarize_kernel(float *x, int n, float *binary)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= n) return;
binary[i] = (x[i] >= 0) ? 1 : -1;
}
|
015fd2471056fd9c84fd121b053a658e44e963f5.cu
|
#include "includes.h"
#ifdef CUDNN
#pragma comment(lib, "cudnn.lib")
#endif
extern "C" {
}
extern "C" {
double get_time_point();
void start_timer();
void stop_timer();
double get_time();
void stop_timer_and_show();
void stop_timer_and_show_name(char *name);
void show_total_time();
}
__global__ void binarize_kernel(float *x, int n, float *binary)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= n) return;
binary[i] = (x[i] >= 0) ? 1 : -1;
}
|
c28a18641a270b8f9f845c1d35545f88d8a269e2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_initialize_kernel;
int xdim0_initialize_kernel_h = -1;
__constant__ int xdim1_initialize_kernel;
int xdim1_initialize_kernel_h = -1;
__constant__ int xdim2_initialize_kernel;
int xdim2_initialize_kernel_h = -1;
__constant__ int xdim3_initialize_kernel;
int xdim3_initialize_kernel_h = -1;
__constant__ int xdim4_initialize_kernel;
int xdim4_initialize_kernel_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#define OPS_ACC0(x) (x)
#define OPS_ACC1(x) (x)
#define OPS_ACC2(x) (x)
#define OPS_ACC3(x) (x)
#define OPS_ACC4(x) (x)
// user function
__device__
void
initialize_kernel_gpu(double *x, double *rho_new, double *rhou_new,
double *rhoE_new, double *rhoin, int *idx) {
x[OPS_ACC0(0)] = xmin + (idx[0] - 2) * dx;
if (x[OPS_ACC0(0)] >= -4.0) {
rho_new[OPS_ACC1(0)] = 1.0 + eps * sin(lambda * x[OPS_ACC0(0)]);
rhou_new[OPS_ACC2(0)] = ur * rho_new[OPS_ACC1(0)];
rhoE_new[OPS_ACC3(0)] =
(pr / gam1) +
0.5 * pow(rhou_new[OPS_ACC2(0)], 2) / rho_new[OPS_ACC1(0)];
} else {
rho_new[OPS_ACC1(0)] = rhol;
rhou_new[OPS_ACC2(0)] = ul * rho_new[OPS_ACC1(0)];
rhoE_new[OPS_ACC3(0)] =
(pl / gam1) +
0.5 * pow(rhou_new[OPS_ACC2(0)], 2) / rho_new[OPS_ACC1(0)];
}
rhoin[OPS_ACC4(0)] =
gam1 * (rhoE_new[OPS_ACC3(0)] -
0.5 * rhou_new[OPS_ACC2(0)] * rhou_new[OPS_ACC2(0)] /
rho_new[OPS_ACC1(0)]);
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
__global__ void
ops_initialize_kernel(double *__restrict arg0, double *__restrict arg1,
double *__restrict arg2, double *__restrict arg3,
double *__restrict arg4, int arg_idx0, int size0) {
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
int arg_idx[1];
arg_idx[0] = arg_idx0 + idx_x;
arg0 += idx_x * 1 * 1;
arg1 += idx_x * 1 * 1;
arg2 += idx_x * 1 * 1;
arg3 += idx_x * 1 * 1;
arg4 += idx_x * 1 * 1;
if (idx_x < size0) {
initialize_kernel_gpu(arg0, arg1, arg2, arg3, arg4, arg_idx);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_initialize_kernel(char const *name, ops_block block, int dim,
int *range, ops_arg arg0, ops_arg arg1,
ops_arg arg2, ops_arg arg3, ops_arg arg4,
ops_arg arg5) {
#else
void ops_par_loop_initialize_kernel_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[6] = {arg0, arg1, arg2, arg3, arg4, arg5};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 6, range, 0))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(0, "initialize_kernel");
OPS_kernels[0].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[1];
int end[1];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 1; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 1; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int arg_idx[1];
#ifdef OPS_MPI
#ifdef OPS_LAZY
ops_block block = desc->block;
sub_block_list sb = OPS_sub_block_list[block->index];
#endif
arg_idx[0] = sb->decomp_disp[0] + start[0];
#else
arg_idx[0] = start[0];
#endif
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
int xdim4 = args[4].dat->size[0];
if (xdim0 != xdim0_initialize_kernel_h ||
xdim1 != xdim1_initialize_kernel_h ||
xdim2 != xdim2_initialize_kernel_h ||
xdim3 != xdim3_initialize_kernel_h ||
xdim4 != xdim4_initialize_kernel_h) {
hipMemcpyToSymbol(xdim0_initialize_kernel, &xdim0, sizeof(int));
xdim0_initialize_kernel_h = xdim0;
hipMemcpyToSymbol(xdim1_initialize_kernel, &xdim1, sizeof(int));
xdim1_initialize_kernel_h = xdim1;
hipMemcpyToSymbol(xdim2_initialize_kernel, &xdim2, sizeof(int));
xdim2_initialize_kernel_h = xdim2;
hipMemcpyToSymbol(xdim3_initialize_kernel, &xdim3, sizeof(int));
xdim3_initialize_kernel_h = xdim3;
hipMemcpyToSymbol(xdim4_initialize_kernel, &xdim4, sizeof(int));
xdim4_initialize_kernel_h = xdim4;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1, 1, 1);
dim3 tblock(OPS_block_size_x, 1, 1);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
char *p_a[6];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
p_a[4] = (char *)args[4].data_d + base4;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 6);
ops_halo_exchanges(args, 6, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[0].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_initialize_kernel), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], arg_idx[0], x_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[0].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 6);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
ops_set_halo_dirtybit3(&args[2], range);
ops_set_halo_dirtybit3(&args[3], range);
ops_set_halo_dirtybit3(&args[4], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[0].mpi_time += t2 - t1;
OPS_kernels[0].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[0].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[0].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[0].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[0].transfer += ops_compute_transfer(dim, start, end, &arg4);
}
}
#ifdef OPS_LAZY
void ops_par_loop_initialize_kernel(char const *name, ops_block block, int dim,
int *range, ops_arg arg0, ops_arg arg1,
ops_arg arg2, ops_arg arg3, ops_arg arg4,
ops_arg arg5) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 0;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 0;
for (int i = 0; i < 2; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 6;
desc->args = (ops_arg *)malloc(6 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->function = ops_par_loop_initialize_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(0, "initialize_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
|
c28a18641a270b8f9f845c1d35545f88d8a269e2.cu
|
//
// auto-generated by ops.py
//
__constant__ int xdim0_initialize_kernel;
int xdim0_initialize_kernel_h = -1;
__constant__ int xdim1_initialize_kernel;
int xdim1_initialize_kernel_h = -1;
__constant__ int xdim2_initialize_kernel;
int xdim2_initialize_kernel_h = -1;
__constant__ int xdim3_initialize_kernel;
int xdim3_initialize_kernel_h = -1;
__constant__ int xdim4_initialize_kernel;
int xdim4_initialize_kernel_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#define OPS_ACC0(x) (x)
#define OPS_ACC1(x) (x)
#define OPS_ACC2(x) (x)
#define OPS_ACC3(x) (x)
#define OPS_ACC4(x) (x)
// user function
__device__
void
initialize_kernel_gpu(double *x, double *rho_new, double *rhou_new,
double *rhoE_new, double *rhoin, int *idx) {
x[OPS_ACC0(0)] = xmin + (idx[0] - 2) * dx;
if (x[OPS_ACC0(0)] >= -4.0) {
rho_new[OPS_ACC1(0)] = 1.0 + eps * sin(lambda * x[OPS_ACC0(0)]);
rhou_new[OPS_ACC2(0)] = ur * rho_new[OPS_ACC1(0)];
rhoE_new[OPS_ACC3(0)] =
(pr / gam1) +
0.5 * pow(rhou_new[OPS_ACC2(0)], 2) / rho_new[OPS_ACC1(0)];
} else {
rho_new[OPS_ACC1(0)] = rhol;
rhou_new[OPS_ACC2(0)] = ul * rho_new[OPS_ACC1(0)];
rhoE_new[OPS_ACC3(0)] =
(pl / gam1) +
0.5 * pow(rhou_new[OPS_ACC2(0)], 2) / rho_new[OPS_ACC1(0)];
}
rhoin[OPS_ACC4(0)] =
gam1 * (rhoE_new[OPS_ACC3(0)] -
0.5 * rhou_new[OPS_ACC2(0)] * rhou_new[OPS_ACC2(0)] /
rho_new[OPS_ACC1(0)]);
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
__global__ void
ops_initialize_kernel(double *__restrict arg0, double *__restrict arg1,
double *__restrict arg2, double *__restrict arg3,
double *__restrict arg4, int arg_idx0, int size0) {
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
int arg_idx[1];
arg_idx[0] = arg_idx0 + idx_x;
arg0 += idx_x * 1 * 1;
arg1 += idx_x * 1 * 1;
arg2 += idx_x * 1 * 1;
arg3 += idx_x * 1 * 1;
arg4 += idx_x * 1 * 1;
if (idx_x < size0) {
initialize_kernel_gpu(arg0, arg1, arg2, arg3, arg4, arg_idx);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_initialize_kernel(char const *name, ops_block block, int dim,
int *range, ops_arg arg0, ops_arg arg1,
ops_arg arg2, ops_arg arg3, ops_arg arg4,
ops_arg arg5) {
#else
void ops_par_loop_initialize_kernel_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[6] = {arg0, arg1, arg2, arg3, arg4, arg5};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 6, range, 0))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(0, "initialize_kernel");
OPS_kernels[0].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[1];
int end[1];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 1; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 1; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int arg_idx[1];
#ifdef OPS_MPI
#ifdef OPS_LAZY
ops_block block = desc->block;
sub_block_list sb = OPS_sub_block_list[block->index];
#endif
arg_idx[0] = sb->decomp_disp[0] + start[0];
#else
arg_idx[0] = start[0];
#endif
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
int xdim4 = args[4].dat->size[0];
if (xdim0 != xdim0_initialize_kernel_h ||
xdim1 != xdim1_initialize_kernel_h ||
xdim2 != xdim2_initialize_kernel_h ||
xdim3 != xdim3_initialize_kernel_h ||
xdim4 != xdim4_initialize_kernel_h) {
cudaMemcpyToSymbol(xdim0_initialize_kernel, &xdim0, sizeof(int));
xdim0_initialize_kernel_h = xdim0;
cudaMemcpyToSymbol(xdim1_initialize_kernel, &xdim1, sizeof(int));
xdim1_initialize_kernel_h = xdim1;
cudaMemcpyToSymbol(xdim2_initialize_kernel, &xdim2, sizeof(int));
xdim2_initialize_kernel_h = xdim2;
cudaMemcpyToSymbol(xdim3_initialize_kernel, &xdim3, sizeof(int));
xdim3_initialize_kernel_h = xdim3;
cudaMemcpyToSymbol(xdim4_initialize_kernel, &xdim4, sizeof(int));
xdim4_initialize_kernel_h = xdim4;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1, 1, 1);
dim3 tblock(OPS_block_size_x, 1, 1);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
char *p_a[6];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
p_a[4] = (char *)args[4].data_d + base4;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 6);
ops_halo_exchanges(args, 6, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[0].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_initialize_kernel<<<grid, tblock>>>((double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], arg_idx[0], x_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[0].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 6);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
ops_set_halo_dirtybit3(&args[2], range);
ops_set_halo_dirtybit3(&args[3], range);
ops_set_halo_dirtybit3(&args[4], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[0].mpi_time += t2 - t1;
OPS_kernels[0].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[0].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[0].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[0].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[0].transfer += ops_compute_transfer(dim, start, end, &arg4);
}
}
#ifdef OPS_LAZY
void ops_par_loop_initialize_kernel(char const *name, ops_block block, int dim,
int *range, ops_arg arg0, ops_arg arg1,
ops_arg arg2, ops_arg arg3, ops_arg arg4,
ops_arg arg5) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 0;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 0;
for (int i = 0; i < 2; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 6;
desc->args = (ops_arg *)malloc(6 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->function = ops_par_loop_initialize_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(0, "initialize_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
|
d8930e0654b98cb63449fcd0bf7900e81461e9f4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <ctime>
#include <vector>
#include <algorithm>
#include <stdlib.h>
// utilities
#include <helper_cuda.h>
#include <time.h>
/////////////////////////////change the data size to larger than 16 gb to test for different memories. L1 is enabled. "ALL_CCFLAGS += -Xptxas -dlcm=ca"
//typedef unsigned char byte;
void init_cpu_data(long long int* A, long long int size, int stride, long long int mod){
for (long long int i = 0; i < size - stride; i++){
A[i]=(i + stride);
}
for (long long int i = size - stride; i < size; i++){
A[i]=0;
}
}
__device__ void P_chasing0(int mark, long long int *A, int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){
int j = starting_index;/////make them in the same page, and miss near in cache lines
for (int it = 0; it < iterations; it++){
j = A[j];
}
B[0] = j;
}
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing1(int mark, long long int *A, long long int iterations, long long int *B, long long int *C, long long int *D, int starting_index, float clock_rate, int data_stride){
long long int j = starting_index;/////make them in the same page, and miss near in cache lines
//long long int start_time = 0;//////clock
//long long int end_time = 0;//////clock
//start_time = clock64();//////clock
for (long long int it = 0; it < iterations; it++){
j = A[j];
}
//end_time=clock64();//////clock
//long long int total_time = end_time - start_time;//////clock
//printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency //////////the print will flush the L1?! (
B[0] = j;
//B[1] = (int) total_time;
}
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing2(int mark, long long int *A, long long int iterations, long long int *B, long long int *C, long long int *D, long long int starting_index, float clock_rate, int data_stride){//////what is the effect of warmup outside vs inside?
//////shared memory: 0xc000 max (49152 Bytes = 48KB)
__shared__ long long int s_tvalue[1024 * 2];/////must be enough to contain the number of iterations.
__shared__ long long int s_index[1024 * 2];/////k40 6 * 1024 total, p100 8 * 1024 total, v100 up to 12 * 1024 total.
//__shared__ long long int s_index[1];
long long int j = starting_index;/////make them in the same page, and miss near in cache lines
//int j = B[0];
long long int start_time = 0;//////clock
long long int end_time = 0;//////clock
long long int time_interval = 0;//////clock
//long long int total_time = end_time - start_time;//////clock
if(false){
for (int it = 0; it < iterations; it++){//////////it here is limited by the size of the shared memory
start_time = clock64();//////clock
j = A[j];
s_index[it] = j;
end_time=clock64();//////clock
s_tvalue[it] = end_time - start_time;
}
}
if(true){
asm(".reg .u64 t1;\n\t"
".reg .u64 t2;\n\t"
".reg .u32 t3;\n\t"
".reg .u32 t4;\n\t"
".reg .u64 t5;\n\t"
".reg .u32 t6;\n\t"
"cvta.to.shared.u64 t5, %0;\n\t"
"cvt.u32.u64 t6, t5;\n\t"
:: "l"(s_index));////////////////////////////////////cvta.to.global.u64 %rd4, %rd25; needed??
for (int it = 0; it < iterations; it++){//////////it here is limited by the size of the shared memory
asm("shl.b64 t1, %3, 3;\n\t"
"add.s64 t2, t1, %4;\n\t"
"shl.b32 t3, %6, 3;\n\t"
"add.s32 t4, t3, t6;\n\t"
"mov.u64 %0, %clock64;\n\t"
"ld.global.u64 %2, [t2];\n\t"
"st.shared.u64 [t4], %2;\n\t"
"mov.u64 %1, %clock64;"
: "=l"(start_time), "=l"(end_time), "=l"(j) : "l"(j), "l"(A), "l"(s_index), "r"(it));
time_interval = end_time - start_time;
//if(it >= 4 * 1024){
s_tvalue[it] = time_interval;
//}
}
}
//printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency
B[0] = j;
for (int it = 0; it < iterations; it++){
C[it] = s_index[it];
D[it] = s_tvalue[it];
}
}
__global__ void tlb_latency_test(long long int *A, long long int iterations, long long int *B, long long int *C, long long int *D, float clock_rate, long long int mod, int data_stride){
long long int reduced_iter = iterations;
if(reduced_iter > 2048){
reduced_iter = 2048;
}else if(reduced_iter < 16){
reduced_iter = 16;
}
///////////kepler L2 has 48 * 1024 = 49152 cache lines. But we only have 1024 * 4 slots in shared memory.
//P_chasing1(0, A, iterations + 0, B, C, D, 0, clock_rate, data_stride);////////saturate the L2
P_chasing2(0, A, reduced_iter, B, C, D, 0, clock_rate, data_stride);////////partially print the data
__syncthreads();
}
int main(int argc, char **argv)
{
printf("\n");
// set device
hipDeviceProp_t device_prop;
//int dev_id = findCudaDevice(argc, (const char **) argv);
int dev_id = 0;
checkCudaErrors(hipGetDeviceProperties(&device_prop, dev_id));
int peak_clk = 1;//kHz
checkCudaErrors(hipDeviceGetAttribute(&peak_clk, hipDeviceAttributeClockRate, dev_id));
float clock_rate = (float) peak_clk;
//printf("clock_rate_out_kernel:%f\n", clock_rate);
if (!device_prop.managedMemory) {
// This samples requires being run on a device that supports Unified Memory
fprintf(stderr, "Unified Memory not supported on this device\n");
exit(EXIT_WAIVED);
}
if (device_prop.computeMode == hipComputeModeProhibited)
{
// This sample requires being run with a default or process exclusive mode
fprintf(stderr, "This sample requires a device in either default or process exclusive mode\n");
exit(EXIT_WAIVED);
}
if (device_prop.concurrentManagedAccess == 1){
printf("This device supports concurrent Managed Access.\n");
}else{
printf("This device does not support concurrent Managed Access.\n");
}
int value1 = 1;
checkCudaErrors(hipDeviceGetAttribute(&value1, hipDeviceAttributeConcurrentManagedAccess, dev_id));
printf("hipDeviceAttributeConcurrentManagedAccess = %d\n", value1);
///////////////////////////////////////////////////////////////////GPU data out
long long int *GPU_data_out;
checkCudaErrors(hipMalloc(&GPU_data_out, sizeof(long long int) * 2));
FILE * pFile;
pFile = fopen ("output.txt","w");
int counter = 0;
/////////change the data stride as to observe if the latency increase is caused by iteration(cache) or stride(tlb)
for(int data_stride = 16 * 256 * 1024; data_stride <= 16 * 256 * 1024; data_stride = data_stride * 2){/////////32mb stride
//data_stride = data_stride + 32;///offset a cache line, trying to cause L2 miss but tlb hit.
//printf("###################data_stride%d#########################\n", data_stride);
//plain managed
fprintf(pFile,"*\n*\n*\n plain managed\n");
fflush(pFile);
for(long long int mod2 = 1073741824; mod2 <= 4294967296; mod2 = mod2 * 2){////268435456 = 1gb, 536870912 = 2gb, 1073741824 = 4gb, 2147483648 = 8gb, 4294967296 = 16gb, 8589934592 = 32gb.
counter++;
///////////////////////////////////////////////////////////////////CPU data begin
//int data_size = 2 * 256 * 1024 * 32;/////size = iteration * stride = 32 2mb pages.
long long int mod = mod2;
long long int data_size = mod;
if(data_size < 4194304){//////////data size at least 16mb to prevent L2 prefetch
data_size = 4194304;
}
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256
long long int *CPU_data_in;
//CPU_data_in = (int*)malloc(sizeof(int) * data_size);
checkCudaErrors(hipMallocManaged(&CPU_data_in, sizeof(long long int) * data_size));/////////////using unified memory
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
long long int reduced_iter = iterations;
if(reduced_iter > 2048){
reduced_iter = 2048;
}else if(reduced_iter < 16){
reduced_iter = 16;
}
long long int *CPU_data_out_index;
CPU_data_out_index = (long long int*)malloc(sizeof(long long int) * reduced_iter);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * reduced_iter);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
//int *GPU_data_in;
//checkCudaErrors(hipMalloc(&GPU_data_in, sizeof(int) * data_size));
//hipMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, hipMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
long long int *GPU_data_out_index;
checkCudaErrors(hipMalloc(&GPU_data_out_index, sizeof(long long int) * reduced_iter));
long long int *GPU_data_out_time;
checkCudaErrors(hipMalloc(&GPU_data_out_time, sizeof(long long int) * reduced_iter));
hipLaunchKernelGGL(( tlb_latency_test), dim3(1), dim3(1), 0, 0, CPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
hipDeviceSynchronize();
hipMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(long long int) * reduced_iter, hipMemcpyDeviceToHost);
hipMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * reduced_iter, hipMemcpyDeviceToHost);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%lld##############%lld\n", mod, iterations);
for (long long int it = 0; it < reduced_iter; it++){
fprintf (pFile, "%lld %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
fflush(pFile);
checkCudaErrors(hipFree(GPU_data_out_index));
checkCudaErrors(hipFree(GPU_data_out_time));
//checkCudaErrors(hipFree(GPU_data_in));
checkCudaErrors(hipFree(CPU_data_in));
//free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
//preferredlocation
fprintf(pFile,"*\n*\n*\n preferredlocation\n");
fflush(pFile);
for(long long int mod2 = 1073741824; mod2 <= 4294967296; mod2 = mod2 * 2){////268435456 = 1gb, 536870912 = 2gb, 1073741824 = 4gb, 2147483648 = 8gb, 4294967296 = 16gb, 8589934592 = 32gb.
counter++;
///////////////////////////////////////////////////////////////////CPU data begin
//int data_size = 2 * 256 * 1024 * 32;/////size = iteration * stride = 32 2mb pages.
long long int mod = mod2;
long long int data_size = mod;
if(data_size < 4194304){//////////data size at least 16mb to prevent L2 prefetch
data_size = 4194304;
}
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256
long long int *CPU_data_in;
//CPU_data_in = (int*)malloc(sizeof(int) * data_size);
checkCudaErrors(hipMallocManaged(&CPU_data_in, sizeof(long long int) * data_size));/////////////using unified memory
checkCudaErrors(hipMemAdvise(CPU_data_in, sizeof(long long int) * data_size, hipMemAdviseSetPreferredLocation, hipCpuDeviceId));////////using hint
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
long long int reduced_iter = iterations;
if(reduced_iter > 2048){
reduced_iter = 2048;
}else if(reduced_iter < 16){
reduced_iter = 16;
}
long long int *CPU_data_out_index;
CPU_data_out_index = (long long int*)malloc(sizeof(long long int) * reduced_iter);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * reduced_iter);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
//int *GPU_data_in;
//checkCudaErrors(hipMalloc(&GPU_data_in, sizeof(int) * data_size));
//hipMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, hipMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
long long int *GPU_data_out_index;
checkCudaErrors(hipMalloc(&GPU_data_out_index, sizeof(long long int) * reduced_iter));
long long int *GPU_data_out_time;
checkCudaErrors(hipMalloc(&GPU_data_out_time, sizeof(long long int) * reduced_iter));
hipLaunchKernelGGL(( tlb_latency_test), dim3(1), dim3(1), 0, 0, CPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
hipDeviceSynchronize();
hipMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(long long int) * reduced_iter, hipMemcpyDeviceToHost);
hipMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * reduced_iter, hipMemcpyDeviceToHost);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%lld##############%lld\n", mod, iterations);
for (long long int it = 0; it < reduced_iter; it++){
fprintf (pFile, "%lld %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
fflush(pFile);
checkCudaErrors(hipFree(GPU_data_out_index));
checkCudaErrors(hipFree(GPU_data_out_time));
//checkCudaErrors(hipFree(GPU_data_in));
checkCudaErrors(hipFree(CPU_data_in));
//free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
//accessedby
fprintf(pFile,"*\n*\n*\n accessedby\n");
fflush(pFile);
for(long long int mod2 = 1073741824; mod2 <= 4294967296; mod2 = mod2 * 2){////268435456 = 1gb, 536870912 = 2gb, 1073741824 = 4gb, 2147483648 = 8gb, 4294967296 = 16gb, 8589934592 = 32gb.
counter++;
///////////////////////////////////////////////////////////////////CPU data begin
//int data_size = 2 * 256 * 1024 * 32;/////size = iteration * stride = 32 2mb pages.
long long int mod = mod2;
long long int data_size = mod;
if(data_size < 4194304){//////////data size at least 16mb to prevent L2 prefetch
data_size = 4194304;
}
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256
long long int *CPU_data_in;
//CPU_data_in = (int*)malloc(sizeof(int) * data_size);
checkCudaErrors(hipMallocManaged(&CPU_data_in, sizeof(long long int) * data_size));/////////////using unified memory
checkCudaErrors(hipMemAdvise(CPU_data_in, sizeof(long long int) * data_size, hipMemAdviseSetAccessedBy, dev_id));//////////using hint
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
long long int reduced_iter = iterations;
if(reduced_iter > 2048){
reduced_iter = 2048;
}else if(reduced_iter < 16){
reduced_iter = 16;
}
long long int *CPU_data_out_index;
CPU_data_out_index = (long long int*)malloc(sizeof(long long int) * reduced_iter);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * reduced_iter);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
//int *GPU_data_in;
//checkCudaErrors(hipMalloc(&GPU_data_in, sizeof(int) * data_size));
//hipMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, hipMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
long long int *GPU_data_out_index;
checkCudaErrors(hipMalloc(&GPU_data_out_index, sizeof(long long int) * reduced_iter));
long long int *GPU_data_out_time;
checkCudaErrors(hipMalloc(&GPU_data_out_time, sizeof(long long int) * reduced_iter));
hipLaunchKernelGGL(( tlb_latency_test), dim3(1), dim3(1), 0, 0, CPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
hipDeviceSynchronize();
hipMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(long long int) * reduced_iter, hipMemcpyDeviceToHost);
hipMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * reduced_iter, hipMemcpyDeviceToHost);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%lld##############%lld\n", mod, iterations);
for (long long int it = 0; it < reduced_iter; it++){
fprintf (pFile, "%lld %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
fflush(pFile);
checkCudaErrors(hipFree(GPU_data_out_index));
checkCudaErrors(hipFree(GPU_data_out_time));
//checkCudaErrors(hipFree(GPU_data_in));
checkCudaErrors(hipFree(CPU_data_in));
//free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
//pinned
fprintf(pFile,"*\n*\n*\n pinned\n");
fflush(pFile);
for(long long int mod2 = 1073741824; mod2 <= 4294967296; mod2 = mod2 * 2){////268435456 = 1gb, 536870912 = 2gb, 1073741824 = 4gb, 2147483648 = 8gb, 4294967296 = 16gb, 8589934592 = 32gb.
counter++;
///////////////////////////////////////////////////////////////////CPU data begin
//int data_size = 2 * 256 * 1024 * 32;/////size = iteration * stride = 32 2mb pages.
long long int mod = mod2;
long long int data_size = mod;
if(data_size < 4194304){//////////data size at least 16mb to prevent L2 prefetch
data_size = 4194304;
}
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256
long long int *CPU_data_in;
//CPU_data_in = (int*)malloc(sizeof(int) * data_size);
checkCudaErrors(hipHostMalloc((void**)&CPU_data_in, sizeof(long long int) * data_size, hipHostMallocDefault));//////////using pinned memory
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
long long int reduced_iter = iterations;
if(reduced_iter > 2048){
reduced_iter = 2048;
}else if(reduced_iter < 16){
reduced_iter = 16;
}
long long int *CPU_data_out_index;
CPU_data_out_index = (long long int*)malloc(sizeof(long long int) * reduced_iter);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * reduced_iter);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
//int *GPU_data_in;
//checkCudaErrors(hipMalloc(&GPU_data_in, sizeof(int) * data_size));
//hipMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, hipMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
long long int *GPU_data_out_index;
checkCudaErrors(hipMalloc(&GPU_data_out_index, sizeof(long long int) * reduced_iter));
long long int *GPU_data_out_time;
checkCudaErrors(hipMalloc(&GPU_data_out_time, sizeof(long long int) * reduced_iter));
hipLaunchKernelGGL(( tlb_latency_test), dim3(1), dim3(1), 0, 0, CPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
hipDeviceSynchronize();
hipMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(long long int) * reduced_iter, hipMemcpyDeviceToHost);
hipMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * reduced_iter, hipMemcpyDeviceToHost);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%lld##############%lld\n", mod, iterations);
for (long long int it = 0; it < reduced_iter; it++){
fprintf (pFile, "%lld %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
fflush(pFile);
checkCudaErrors(hipFree(GPU_data_out_index));
checkCudaErrors(hipFree(GPU_data_out_time));
//checkCudaErrors(hipFree(GPU_data_in));
//checkCudaErrors(hipFree(CPU_data_in));
checkCudaErrors(hipHostFree(CPU_data_in));//////using pinned memory
//free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
//memcopy
fprintf(pFile,"*\n*\n*\n memcopy\n");
fflush(pFile);
for(long long int mod2 = 1073741824; mod2 <= 4294967296; mod2 = mod2 * 2){////268435456 = 1gb, 536870912 = 2gb, 1073741824 = 4gb, 2147483648 = 8gb, 4294967296 = 16gb, 8589934592 = 32gb.
counter++;
///////////////////////////////////////////////////////////////////CPU data begin
//int data_size = 2 * 256 * 1024 * 32;/////size = iteration * stride = 32 2mb pages.
long long int mod = mod2;
long long int data_size = mod;
if(data_size < 4194304){//////////data size at least 16mb to prevent L2 prefetch
data_size = 4194304;
}
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256
long long int *CPU_data_in;
CPU_data_in = (long long int*)malloc(sizeof(long long int) * data_size);
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
long long int reduced_iter = iterations;
if(reduced_iter > 2048){
reduced_iter = 2048;
}else if(reduced_iter < 16){
reduced_iter = 16;
}
long long int *CPU_data_out_index;
CPU_data_out_index = (long long int*)malloc(sizeof(long long int) * reduced_iter);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * reduced_iter);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
long long int *GPU_data_in;
checkCudaErrors(hipMalloc(&GPU_data_in, sizeof(long long int) * data_size));
hipMemcpy(GPU_data_in, CPU_data_in, sizeof(long long int) * data_size, hipMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
long long int *GPU_data_out_index;
checkCudaErrors(hipMalloc(&GPU_data_out_index, sizeof(long long int) * reduced_iter));
long long int *GPU_data_out_time;
checkCudaErrors(hipMalloc(&GPU_data_out_time, sizeof(long long int) * reduced_iter));
hipLaunchKernelGGL(( tlb_latency_test), dim3(1), dim3(1), 0, 0, GPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
hipDeviceSynchronize();
hipMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(long long int) * reduced_iter, hipMemcpyDeviceToHost);
hipMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * reduced_iter, hipMemcpyDeviceToHost);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%lld##############%lld\n", mod, iterations);
for (long long int it = 0; it < reduced_iter; it++){
fprintf (pFile, "%lld %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
fflush(pFile);
checkCudaErrors(hipFree(GPU_data_out_index));
checkCudaErrors(hipFree(GPU_data_out_time));
checkCudaErrors(hipFree(GPU_data_in));
free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
//printf("############################################\n\n");
}
checkCudaErrors(hipFree(GPU_data_out));
//free(CPU_data_out);
fclose (pFile);
exit(EXIT_SUCCESS);
}
|
d8930e0654b98cb63449fcd0bf7900e81461e9f4.cu
|
#include <cstdio>
#include <ctime>
#include <vector>
#include <algorithm>
#include <stdlib.h>
// utilities
#include <helper_cuda.h>
#include <time.h>
/////////////////////////////change the data size to larger than 16 gb to test for different memories. L1 is enabled. "ALL_CCFLAGS += -Xptxas -dlcm=ca"
//typedef unsigned char byte;
void init_cpu_data(long long int* A, long long int size, int stride, long long int mod){
for (long long int i = 0; i < size - stride; i++){
A[i]=(i + stride);
}
for (long long int i = size - stride; i < size; i++){
A[i]=0;
}
}
__device__ void P_chasing0(int mark, long long int *A, int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){
int j = starting_index;/////make them in the same page, and miss near in cache lines
for (int it = 0; it < iterations; it++){
j = A[j];
}
B[0] = j;
}
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing1(int mark, long long int *A, long long int iterations, long long int *B, long long int *C, long long int *D, int starting_index, float clock_rate, int data_stride){
long long int j = starting_index;/////make them in the same page, and miss near in cache lines
//long long int start_time = 0;//////clock
//long long int end_time = 0;//////clock
//start_time = clock64();//////clock
for (long long int it = 0; it < iterations; it++){
j = A[j];
}
//end_time=clock64();//////clock
//long long int total_time = end_time - start_time;//////clock
//printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency //////////the print will flush the L1?! (
B[0] = j;
//B[1] = (int) total_time;
}
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing2(int mark, long long int *A, long long int iterations, long long int *B, long long int *C, long long int *D, long long int starting_index, float clock_rate, int data_stride){//////what is the effect of warmup outside vs inside?
//////shared memory: 0xc000 max (49152 Bytes = 48KB)
__shared__ long long int s_tvalue[1024 * 2];/////must be enough to contain the number of iterations.
__shared__ long long int s_index[1024 * 2];/////k40 6 * 1024 total, p100 8 * 1024 total, v100 up to 12 * 1024 total.
//__shared__ long long int s_index[1];
long long int j = starting_index;/////make them in the same page, and miss near in cache lines
//int j = B[0];
long long int start_time = 0;//////clock
long long int end_time = 0;//////clock
long long int time_interval = 0;//////clock
//long long int total_time = end_time - start_time;//////clock
if(false){
for (int it = 0; it < iterations; it++){//////////it here is limited by the size of the shared memory
start_time = clock64();//////clock
j = A[j];
s_index[it] = j;
end_time=clock64();//////clock
s_tvalue[it] = end_time - start_time;
}
}
if(true){
asm(".reg .u64 t1;\n\t"
".reg .u64 t2;\n\t"
".reg .u32 t3;\n\t"
".reg .u32 t4;\n\t"
".reg .u64 t5;\n\t"
".reg .u32 t6;\n\t"
"cvta.to.shared.u64 t5, %0;\n\t"
"cvt.u32.u64 t6, t5;\n\t"
:: "l"(s_index));////////////////////////////////////cvta.to.global.u64 %rd4, %rd25; needed??
for (int it = 0; it < iterations; it++){//////////it here is limited by the size of the shared memory
asm("shl.b64 t1, %3, 3;\n\t"
"add.s64 t2, t1, %4;\n\t"
"shl.b32 t3, %6, 3;\n\t"
"add.s32 t4, t3, t6;\n\t"
"mov.u64 %0, %clock64;\n\t"
"ld.global.u64 %2, [t2];\n\t"
"st.shared.u64 [t4], %2;\n\t"
"mov.u64 %1, %clock64;"
: "=l"(start_time), "=l"(end_time), "=l"(j) : "l"(j), "l"(A), "l"(s_index), "r"(it));
time_interval = end_time - start_time;
//if(it >= 4 * 1024){
s_tvalue[it] = time_interval;
//}
}
}
//printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency
B[0] = j;
for (int it = 0; it < iterations; it++){
C[it] = s_index[it];
D[it] = s_tvalue[it];
}
}
__global__ void tlb_latency_test(long long int *A, long long int iterations, long long int *B, long long int *C, long long int *D, float clock_rate, long long int mod, int data_stride){
long long int reduced_iter = iterations;
if(reduced_iter > 2048){
reduced_iter = 2048;
}else if(reduced_iter < 16){
reduced_iter = 16;
}
///////////kepler L2 has 48 * 1024 = 49152 cache lines. But we only have 1024 * 4 slots in shared memory.
//P_chasing1(0, A, iterations + 0, B, C, D, 0, clock_rate, data_stride);////////saturate the L2
P_chasing2(0, A, reduced_iter, B, C, D, 0, clock_rate, data_stride);////////partially print the data
__syncthreads();
}
int main(int argc, char **argv)
{
printf("\n");
// set device
cudaDeviceProp device_prop;
//int dev_id = findCudaDevice(argc, (const char **) argv);
int dev_id = 0;
checkCudaErrors(cudaGetDeviceProperties(&device_prop, dev_id));
int peak_clk = 1;//kHz
checkCudaErrors(cudaDeviceGetAttribute(&peak_clk, cudaDevAttrClockRate, dev_id));
float clock_rate = (float) peak_clk;
//printf("clock_rate_out_kernel:%f\n", clock_rate);
if (!device_prop.managedMemory) {
// This samples requires being run on a device that supports Unified Memory
fprintf(stderr, "Unified Memory not supported on this device\n");
exit(EXIT_WAIVED);
}
if (device_prop.computeMode == cudaComputeModeProhibited)
{
// This sample requires being run with a default or process exclusive mode
fprintf(stderr, "This sample requires a device in either default or process exclusive mode\n");
exit(EXIT_WAIVED);
}
if (device_prop.concurrentManagedAccess == 1){
printf("This device supports concurrent Managed Access.\n");
}else{
printf("This device does not support concurrent Managed Access.\n");
}
int value1 = 1;
checkCudaErrors(cudaDeviceGetAttribute(&value1, cudaDevAttrConcurrentManagedAccess, dev_id));
printf("cudaDevAttrConcurrentManagedAccess = %d\n", value1);
///////////////////////////////////////////////////////////////////GPU data out
long long int *GPU_data_out;
checkCudaErrors(cudaMalloc(&GPU_data_out, sizeof(long long int) * 2));
FILE * pFile;
pFile = fopen ("output.txt","w");
int counter = 0;
/////////change the data stride as to observe if the latency increase is caused by iteration(cache) or stride(tlb)
for(int data_stride = 16 * 256 * 1024; data_stride <= 16 * 256 * 1024; data_stride = data_stride * 2){/////////32mb stride
//data_stride = data_stride + 32;///offset a cache line, trying to cause L2 miss but tlb hit.
//printf("###################data_stride%d#########################\n", data_stride);
//plain managed
fprintf(pFile,"*\n*\n*\n plain managed\n");
fflush(pFile);
for(long long int mod2 = 1073741824; mod2 <= 4294967296; mod2 = mod2 * 2){////268435456 = 1gb, 536870912 = 2gb, 1073741824 = 4gb, 2147483648 = 8gb, 4294967296 = 16gb, 8589934592 = 32gb.
counter++;
///////////////////////////////////////////////////////////////////CPU data begin
//int data_size = 2 * 256 * 1024 * 32;/////size = iteration * stride = 32 2mb pages.
long long int mod = mod2;
long long int data_size = mod;
if(data_size < 4194304){//////////data size at least 16mb to prevent L2 prefetch
data_size = 4194304;
}
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256
long long int *CPU_data_in;
//CPU_data_in = (int*)malloc(sizeof(int) * data_size);
checkCudaErrors(cudaMallocManaged(&CPU_data_in, sizeof(long long int) * data_size));/////////////using unified memory
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
long long int reduced_iter = iterations;
if(reduced_iter > 2048){
reduced_iter = 2048;
}else if(reduced_iter < 16){
reduced_iter = 16;
}
long long int *CPU_data_out_index;
CPU_data_out_index = (long long int*)malloc(sizeof(long long int) * reduced_iter);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * reduced_iter);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
//int *GPU_data_in;
//checkCudaErrors(cudaMalloc(&GPU_data_in, sizeof(int) * data_size));
//cudaMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, cudaMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
long long int *GPU_data_out_index;
checkCudaErrors(cudaMalloc(&GPU_data_out_index, sizeof(long long int) * reduced_iter));
long long int *GPU_data_out_time;
checkCudaErrors(cudaMalloc(&GPU_data_out_time, sizeof(long long int) * reduced_iter));
tlb_latency_test<<<1, 1>>>(CPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
cudaDeviceSynchronize();
cudaMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(long long int) * reduced_iter, cudaMemcpyDeviceToHost);
cudaMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * reduced_iter, cudaMemcpyDeviceToHost);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%lld##############%lld\n", mod, iterations);
for (long long int it = 0; it < reduced_iter; it++){
fprintf (pFile, "%lld %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
fflush(pFile);
checkCudaErrors(cudaFree(GPU_data_out_index));
checkCudaErrors(cudaFree(GPU_data_out_time));
//checkCudaErrors(cudaFree(GPU_data_in));
checkCudaErrors(cudaFree(CPU_data_in));
//free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
//preferredlocation
fprintf(pFile,"*\n*\n*\n preferredlocation\n");
fflush(pFile);
for(long long int mod2 = 1073741824; mod2 <= 4294967296; mod2 = mod2 * 2){////268435456 = 1gb, 536870912 = 2gb, 1073741824 = 4gb, 2147483648 = 8gb, 4294967296 = 16gb, 8589934592 = 32gb.
counter++;
///////////////////////////////////////////////////////////////////CPU data begin
//int data_size = 2 * 256 * 1024 * 32;/////size = iteration * stride = 32 2mb pages.
long long int mod = mod2;
long long int data_size = mod;
if(data_size < 4194304){//////////data size at least 16mb to prevent L2 prefetch
data_size = 4194304;
}
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256
long long int *CPU_data_in;
//CPU_data_in = (int*)malloc(sizeof(int) * data_size);
checkCudaErrors(cudaMallocManaged(&CPU_data_in, sizeof(long long int) * data_size));/////////////using unified memory
checkCudaErrors(cudaMemAdvise(CPU_data_in, sizeof(long long int) * data_size, cudaMemAdviseSetPreferredLocation, cudaCpuDeviceId));////////using hint
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
long long int reduced_iter = iterations;
if(reduced_iter > 2048){
reduced_iter = 2048;
}else if(reduced_iter < 16){
reduced_iter = 16;
}
long long int *CPU_data_out_index;
CPU_data_out_index = (long long int*)malloc(sizeof(long long int) * reduced_iter);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * reduced_iter);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
//int *GPU_data_in;
//checkCudaErrors(cudaMalloc(&GPU_data_in, sizeof(int) * data_size));
//cudaMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, cudaMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
long long int *GPU_data_out_index;
checkCudaErrors(cudaMalloc(&GPU_data_out_index, sizeof(long long int) * reduced_iter));
long long int *GPU_data_out_time;
checkCudaErrors(cudaMalloc(&GPU_data_out_time, sizeof(long long int) * reduced_iter));
tlb_latency_test<<<1, 1>>>(CPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
cudaDeviceSynchronize();
cudaMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(long long int) * reduced_iter, cudaMemcpyDeviceToHost);
cudaMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * reduced_iter, cudaMemcpyDeviceToHost);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%lld##############%lld\n", mod, iterations);
for (long long int it = 0; it < reduced_iter; it++){
fprintf (pFile, "%lld %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
fflush(pFile);
checkCudaErrors(cudaFree(GPU_data_out_index));
checkCudaErrors(cudaFree(GPU_data_out_time));
//checkCudaErrors(cudaFree(GPU_data_in));
checkCudaErrors(cudaFree(CPU_data_in));
//free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
//accessedby
fprintf(pFile,"*\n*\n*\n accessedby\n");
fflush(pFile);
for(long long int mod2 = 1073741824; mod2 <= 4294967296; mod2 = mod2 * 2){////268435456 = 1gb, 536870912 = 2gb, 1073741824 = 4gb, 2147483648 = 8gb, 4294967296 = 16gb, 8589934592 = 32gb.
counter++;
///////////////////////////////////////////////////////////////////CPU data begin
//int data_size = 2 * 256 * 1024 * 32;/////size = iteration * stride = 32 2mb pages.
long long int mod = mod2;
long long int data_size = mod;
if(data_size < 4194304){//////////data size at least 16mb to prevent L2 prefetch
data_size = 4194304;
}
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256
long long int *CPU_data_in;
//CPU_data_in = (int*)malloc(sizeof(int) * data_size);
checkCudaErrors(cudaMallocManaged(&CPU_data_in, sizeof(long long int) * data_size));/////////////using unified memory
checkCudaErrors(cudaMemAdvise(CPU_data_in, sizeof(long long int) * data_size, cudaMemAdviseSetAccessedBy, dev_id));//////////using hint
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
long long int reduced_iter = iterations;
if(reduced_iter > 2048){
reduced_iter = 2048;
}else if(reduced_iter < 16){
reduced_iter = 16;
}
long long int *CPU_data_out_index;
CPU_data_out_index = (long long int*)malloc(sizeof(long long int) * reduced_iter);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * reduced_iter);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
//int *GPU_data_in;
//checkCudaErrors(cudaMalloc(&GPU_data_in, sizeof(int) * data_size));
//cudaMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, cudaMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
long long int *GPU_data_out_index;
checkCudaErrors(cudaMalloc(&GPU_data_out_index, sizeof(long long int) * reduced_iter));
long long int *GPU_data_out_time;
checkCudaErrors(cudaMalloc(&GPU_data_out_time, sizeof(long long int) * reduced_iter));
tlb_latency_test<<<1, 1>>>(CPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
cudaDeviceSynchronize();
cudaMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(long long int) * reduced_iter, cudaMemcpyDeviceToHost);
cudaMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * reduced_iter, cudaMemcpyDeviceToHost);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%lld##############%lld\n", mod, iterations);
for (long long int it = 0; it < reduced_iter; it++){
fprintf (pFile, "%lld %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
fflush(pFile);
checkCudaErrors(cudaFree(GPU_data_out_index));
checkCudaErrors(cudaFree(GPU_data_out_time));
//checkCudaErrors(cudaFree(GPU_data_in));
checkCudaErrors(cudaFree(CPU_data_in));
//free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
//pinned
fprintf(pFile,"*\n*\n*\n pinned\n");
fflush(pFile);
for(long long int mod2 = 1073741824; mod2 <= 4294967296; mod2 = mod2 * 2){////268435456 = 1gb, 536870912 = 2gb, 1073741824 = 4gb, 2147483648 = 8gb, 4294967296 = 16gb, 8589934592 = 32gb.
counter++;
///////////////////////////////////////////////////////////////////CPU data begin
//int data_size = 2 * 256 * 1024 * 32;/////size = iteration * stride = 32 2mb pages.
long long int mod = mod2;
long long int data_size = mod;
if(data_size < 4194304){//////////data size at least 16mb to prevent L2 prefetch
data_size = 4194304;
}
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256
long long int *CPU_data_in;
//CPU_data_in = (int*)malloc(sizeof(int) * data_size);
checkCudaErrors(cudaHostAlloc((void**)&CPU_data_in, sizeof(long long int) * data_size, cudaHostAllocDefault));//////////using pinned memory
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
long long int reduced_iter = iterations;
if(reduced_iter > 2048){
reduced_iter = 2048;
}else if(reduced_iter < 16){
reduced_iter = 16;
}
long long int *CPU_data_out_index;
CPU_data_out_index = (long long int*)malloc(sizeof(long long int) * reduced_iter);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * reduced_iter);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
//int *GPU_data_in;
//checkCudaErrors(cudaMalloc(&GPU_data_in, sizeof(int) * data_size));
//cudaMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, cudaMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
long long int *GPU_data_out_index;
checkCudaErrors(cudaMalloc(&GPU_data_out_index, sizeof(long long int) * reduced_iter));
long long int *GPU_data_out_time;
checkCudaErrors(cudaMalloc(&GPU_data_out_time, sizeof(long long int) * reduced_iter));
tlb_latency_test<<<1, 1>>>(CPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
cudaDeviceSynchronize();
cudaMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(long long int) * reduced_iter, cudaMemcpyDeviceToHost);
cudaMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * reduced_iter, cudaMemcpyDeviceToHost);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%lld##############%lld\n", mod, iterations);
for (long long int it = 0; it < reduced_iter; it++){
fprintf (pFile, "%lld %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
fflush(pFile);
checkCudaErrors(cudaFree(GPU_data_out_index));
checkCudaErrors(cudaFree(GPU_data_out_time));
//checkCudaErrors(cudaFree(GPU_data_in));
//checkCudaErrors(cudaFree(CPU_data_in));
checkCudaErrors(cudaFreeHost(CPU_data_in));//////using pinned memory
//free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
//memcopy
fprintf(pFile,"*\n*\n*\n memcopy\n");
fflush(pFile);
for(long long int mod2 = 1073741824; mod2 <= 4294967296; mod2 = mod2 * 2){////268435456 = 1gb, 536870912 = 2gb, 1073741824 = 4gb, 2147483648 = 8gb, 4294967296 = 16gb, 8589934592 = 32gb.
counter++;
///////////////////////////////////////////////////////////////////CPU data begin
//int data_size = 2 * 256 * 1024 * 32;/////size = iteration * stride = 32 2mb pages.
long long int mod = mod2;
long long int data_size = mod;
if(data_size < 4194304){//////////data size at least 16mb to prevent L2 prefetch
data_size = 4194304;
}
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256
long long int *CPU_data_in;
CPU_data_in = (long long int*)malloc(sizeof(long long int) * data_size);
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
long long int reduced_iter = iterations;
if(reduced_iter > 2048){
reduced_iter = 2048;
}else if(reduced_iter < 16){
reduced_iter = 16;
}
long long int *CPU_data_out_index;
CPU_data_out_index = (long long int*)malloc(sizeof(long long int) * reduced_iter);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * reduced_iter);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
long long int *GPU_data_in;
checkCudaErrors(cudaMalloc(&GPU_data_in, sizeof(long long int) * data_size));
cudaMemcpy(GPU_data_in, CPU_data_in, sizeof(long long int) * data_size, cudaMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
long long int *GPU_data_out_index;
checkCudaErrors(cudaMalloc(&GPU_data_out_index, sizeof(long long int) * reduced_iter));
long long int *GPU_data_out_time;
checkCudaErrors(cudaMalloc(&GPU_data_out_time, sizeof(long long int) * reduced_iter));
tlb_latency_test<<<1, 1>>>(GPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
cudaDeviceSynchronize();
cudaMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(long long int) * reduced_iter, cudaMemcpyDeviceToHost);
cudaMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * reduced_iter, cudaMemcpyDeviceToHost);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%lld##############%lld\n", mod, iterations);
for (long long int it = 0; it < reduced_iter; it++){
fprintf (pFile, "%lld %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
fflush(pFile);
checkCudaErrors(cudaFree(GPU_data_out_index));
checkCudaErrors(cudaFree(GPU_data_out_time));
checkCudaErrors(cudaFree(GPU_data_in));
free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
//printf("############################################\n\n");
}
checkCudaErrors(cudaFree(GPU_data_out));
//free(CPU_data_out);
fclose (pFile);
exit(EXIT_SUCCESS);
}
|
a6fff63b004e8be5174dfc38b6fb24b54903a9da.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
All modification made by Cambricon Corporation: 2018-2019 Cambricon Corporation
All rights reserved.
All other contributions:
Copyright (c) 2014--2019, the respective contributors
All rights reserved.
For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/roi_pooling_layer.hpp"
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__global__ void ROIPoolForward(const int nthreads, const Dtype* bottom_data,
const Dtype spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const Dtype* bottom_rois, Dtype* top_data, int* argmax_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
int roi_start_w = round(bottom_rois[1] * spatial_scale);
int roi_start_h = round(bottom_rois[2] * spatial_scale);
int roi_end_w = round(bottom_rois[3] * spatial_scale);
int roi_end_h = round(bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<Dtype>(ph)
* bin_size_h));
int wstart = static_cast<int>(floor(static_cast<Dtype>(pw)
* bin_size_w));
int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1)
* bin_size_h));
int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1)
* bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
Dtype maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
bottom_data += (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (bottom_data[bottom_index] > maxval) {
maxval = bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
argmax_data[index] = maxidx;
}
}
template <typename Dtype>
void ROIPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_rois = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int* argmax_data = max_idx_.mutable_gpu_data();
int count = top[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ROIPoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, spatial_scale_, channels_, height_, width_,
pooled_height_, pooled_width_, bottom_rois, top_data, argmax_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void ROIPoolBackward(const int nthreads, const Dtype* top_diff,
const int* argmax_data, const int num_rois, const Dtype spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, Dtype* bottom_diff,
const Dtype* bottom_rois) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
Dtype gradient = 0;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Skip if ROI's batch index doesn't match n
if (n != roi_batch_ind) {
continue;
}
int roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
int roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
int roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
int roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Skip if ROI doesn't include (h, w)
const bool in_roi = (w >= roi_start_w && w <= roi_end_w &&
h >= roi_start_h && h <= roi_end_h);
if (!in_roi) {
continue;
}
int offset = (roi_n * channels + c) * pooled_height * pooled_width;
const Dtype* offset_top_diff = top_diff + offset;
const int* offset_argmax_data = argmax_data + offset;
// Compute feasible set of pooled units that could have pooled
// this bottom unit
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h);
int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h);
int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w);
int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w);
phstart = min(max(phstart, 0), pooled_height);
phend = min(max(phend, 0), pooled_height);
pwstart = min(max(pwstart, 0), pooled_width);
pwend = min(max(pwend, 0), pooled_width);
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (offset_argmax_data[ph * pooled_width + pw] == (h * width + w)) {
gradient += offset_top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void ROIPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* bottom_rois = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
const int* argmax_data = max_idx_.gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ROIPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, argmax_data, top[0]->num(), spatial_scale_, channels_,
height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(ROIPoolingLayer);
} // namespace caffe
|
a6fff63b004e8be5174dfc38b6fb24b54903a9da.cu
|
/*
All modification made by Cambricon Corporation: © 2018-2019 Cambricon Corporation
All rights reserved.
All other contributions:
Copyright (c) 2014--2019, the respective contributors
All rights reserved.
For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/roi_pooling_layer.hpp"
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__global__ void ROIPoolForward(const int nthreads, const Dtype* bottom_data,
const Dtype spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const Dtype* bottom_rois, Dtype* top_data, int* argmax_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
int roi_start_w = round(bottom_rois[1] * spatial_scale);
int roi_start_h = round(bottom_rois[2] * spatial_scale);
int roi_end_w = round(bottom_rois[3] * spatial_scale);
int roi_end_h = round(bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<Dtype>(ph)
* bin_size_h));
int wstart = static_cast<int>(floor(static_cast<Dtype>(pw)
* bin_size_w));
int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1)
* bin_size_h));
int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1)
* bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
Dtype maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
bottom_data += (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (bottom_data[bottom_index] > maxval) {
maxval = bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
argmax_data[index] = maxidx;
}
}
template <typename Dtype>
void ROIPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_rois = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int* argmax_data = max_idx_.mutable_gpu_data();
int count = top[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
ROIPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, spatial_scale_, channels_, height_, width_,
pooled_height_, pooled_width_, bottom_rois, top_data, argmax_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void ROIPoolBackward(const int nthreads, const Dtype* top_diff,
const int* argmax_data, const int num_rois, const Dtype spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, Dtype* bottom_diff,
const Dtype* bottom_rois) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
Dtype gradient = 0;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Skip if ROI's batch index doesn't match n
if (n != roi_batch_ind) {
continue;
}
int roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
int roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
int roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
int roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Skip if ROI doesn't include (h, w)
const bool in_roi = (w >= roi_start_w && w <= roi_end_w &&
h >= roi_start_h && h <= roi_end_h);
if (!in_roi) {
continue;
}
int offset = (roi_n * channels + c) * pooled_height * pooled_width;
const Dtype* offset_top_diff = top_diff + offset;
const int* offset_argmax_data = argmax_data + offset;
// Compute feasible set of pooled units that could have pooled
// this bottom unit
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h);
int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h);
int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w);
int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w);
phstart = min(max(phstart, 0), pooled_height);
phend = min(max(phend, 0), pooled_height);
pwstart = min(max(pwstart, 0), pooled_width);
pwend = min(max(pwend, 0), pooled_width);
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (offset_argmax_data[ph * pooled_width + pw] == (h * width + w)) {
gradient += offset_top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void ROIPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* bottom_rois = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
const int* argmax_data = max_idx_.gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
ROIPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, argmax_data, top[0]->num(), spatial_scale_, channels_,
height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(ROIPoolingLayer);
} // namespace caffe
|
01bde793d350103d73a6c365952c280858091eec.hip
|
// !!! This is a file automatically generated by hipify!!!
// Matrix Multiplication in CUDA
#include <stdio.h>
//#include <string.h>
//#include <assert.h>
//#include <stdlib.h>
#include <hip/hip_runtime.h>
#define TM 1
// includes, project
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
#define WIDTH 32
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int);
// FILL HERE: define constant variable
// MatrixMul kernel
/**
* CUDA Kernel Device code
*
* Computes the matrix multiplication of A and B into C. The 3 matrices have the same
* number of elements WIDTH*WIDTH.
*/
// FILL HERE: translate C-version matrixMul to CUDA-version kernel code
__global__ void MatrixMul(float* A, float* B, float* C, unsigned long long* runtime)
{
unsigned long long start_time = clock64();
// TODO : Kernel Function
// C = A * B
// -->
/*
for(int i = 0; i < WIDTH; i++)
{
for(int j = 0; j < WIDTH; j++)
{
for(int k = 0; k < WIDTH; k++)
{
C[i*WIDTH + j] += A[i*WIDTH+k] * B[k*WIDTH + j];
}
}
}*/
// <--
// for(int i = 0; i < WIDTH; i++)
// for(int j = 0; j < WIDTH; j++)
int tid = threadIdx.x;
int row = tid/WIDTH;
int col = tid%WIDTH;
for(int k = 0; k < WIDTH; k++)
{
C[row*WIDTH + col] += A[row*WIDTH+k] * B[k*WIDTH + col];
}
unsigned long long stop_time = clock64();
runtime[tid] = (unsigned long long)(stop_time - start_time);
}
/**
* Host main routine
*/
int main(void)
{
//unsigned long long* d_runtime;
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
// Print the matrix size to be used, and compute its size
int size = WIDTH*WIDTH*sizeof(float);
printf("[MatrixMul of %d x %d elements]\n", WIDTH, WIDTH);
// Allocate the host input matrix h_A
float *h_A = (float *)malloc(size);
// Allocate the host input matrix h_B
float *h_B = (float *)malloc(size);
// Allocate the host input matrix h_C
float *h_C = (float *)malloc(size);
// Allocate the host matrix for compute check
float *reference = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL || reference == NULL)
{
fprintf(stderr, "Failed to allocate host matrices!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input matrices
for (int i = 0; i < WIDTH; ++i)
{
for (int j = 0; j < WIDTH; ++j)
{
h_A[i*WIDTH + j] = 0.01f;
h_B[i*WIDTH + j] = 1.0f;
}
}
memset(h_C, 0, size);
memset(reference, 0, size);
// compute the matrix multiplication on the CPU for comparison
computeGold(reference, h_A, h_B, WIDTH, WIDTH, WIDTH);
// Allocate device input matrices
// TODO : Leave/Remove the given hipMalloc code properly
// -->
float* d_A = NULL;
err = hipMalloc((void**)&d_A, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device matrix A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
float* d_B = NULL;
err = hipMalloc((void**)&d_B, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device matrix B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// <--
// Allocate the device output matrix
float* d_C = NULL;
err = hipMalloc((void**)&d_C, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device matrix C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input matrix A and B in host memory to the device input matrices in
// device memory
// TODO : Add proper mem copy APIs according to the memory that matrix A and B will be stored
// -->
printf("Copy input data from the host memory to the CUDA device\n");
err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) ;// FILL HERE
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy matrix A from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) ;// FILL HERE
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy matrix B from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// <--
// TODO : Clock Measurements
// Add code to return clock cycles from kernel
// -->
#ifdef TM
unsigned long long* d_runtime;
int r_size = WIDTH*WIDTH*sizeof(unsigned long long);
unsigned long long* runtime = (unsigned long long*)malloc(r_size);
memset(runtime, 0, r_size);
hipMalloc((void**)&d_runtime, r_size);
#endif
// <--
// TODO : Kernel Invocation
// Assign as many threads as the size of matrix in a thread block and
// invoke the kernel function.
// -->
int blocksPerGrid = 1 ;// FILL HERE
int threadsPerBlock = 1024 ;// FILL HERE
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
//MatrixMul(d_A, d_B, d_C);
hipLaunchKernelGGL(( MatrixMul), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, d_runtime);
// <--
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch matrixMul kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipDeviceSynchronize();
// Copy the device result matrix in device memory to the host result matrix
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy matrix C from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipDeviceSynchronize();
// Verify that the result matrix is correct
bool res = 1;
for (int i = 0; i < WIDTH*WIDTH; i++)
{
float diff = fabs(reference[i] - h_C[i]);
if(diff > 0.001f)
{
res = 0;
break;
}
}
printf("Test %s\n", (res == 1) ? "PASSED" : "FAILED");
// printf("The runtime is %llu", d_runtime);
// TODO : Get elapsed clock cycles from device to host
// Take the longest time as kernel execution time
// -->
#ifdef TM
hipMemcpy(runtime, d_runtime, r_size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
unsigned long long elapsed_time = 0;
for(int i = 0; i < WIDTH*WIDTH; i++)
if(elapsed_time < runtime[i])
elapsed_time = runtime[i];
printf("Kernel Execution Time: %llu cycles\n", elapsed_time);
#endif
// <--
// TODO : Free device global memory
// Leave/Remove the given hipFree statement according to your data allocation
// -->
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
#ifdef TM
hipFree(d_runtime);
#endif
// <--
// Free host memory
free(h_A);
free(h_B);
free(h_C);
free(reference);
#ifdef TM
free(runtime);
#endif
return 0;
}
void
computeGold(float* C, const float* A, const float* B, unsigned int hA, unsigned int wA, unsigned int wB)
{
for (unsigned int i = 0; i < hA; ++i)
for (unsigned int j = 0; j < wB; ++j) {
double sum = 0;
for (unsigned int k = 0; k < wA; ++k) {
double a = A[i * wA + k];
double b = B[k * wB + j];
sum += a * b;
}
C[i * wB + j] = (float)sum;
}
}
|
01bde793d350103d73a6c365952c280858091eec.cu
|
// Matrix Multiplication in CUDA
#include <stdio.h>
//#include <string.h>
//#include <assert.h>
//#include <stdlib.h>
#include <cuda_runtime.h>
#define TM 1
// includes, project
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
#define WIDTH 32
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int);
// FILL HERE: define constant variable
// MatrixMul kernel
/**
* CUDA Kernel Device code
*
* Computes the matrix multiplication of A and B into C. The 3 matrices have the same
* number of elements WIDTH*WIDTH.
*/
// FILL HERE: translate C-version matrixMul to CUDA-version kernel code
__global__ void MatrixMul(float* A, float* B, float* C, unsigned long long* runtime)
{
unsigned long long start_time = clock64();
// TODO : Kernel Function
// C = A * B
// -->
/*
for(int i = 0; i < WIDTH; i++)
{
for(int j = 0; j < WIDTH; j++)
{
for(int k = 0; k < WIDTH; k++)
{
C[i*WIDTH + j] += A[i*WIDTH+k] * B[k*WIDTH + j];
}
}
}*/
// <--
// for(int i = 0; i < WIDTH; i++)
// for(int j = 0; j < WIDTH; j++)
int tid = threadIdx.x;
int row = tid/WIDTH;
int col = tid%WIDTH;
for(int k = 0; k < WIDTH; k++)
{
C[row*WIDTH + col] += A[row*WIDTH+k] * B[k*WIDTH + col];
}
unsigned long long stop_time = clock64();
runtime[tid] = (unsigned long long)(stop_time - start_time);
}
/**
* Host main routine
*/
int main(void)
{
//unsigned long long* d_runtime;
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
// Print the matrix size to be used, and compute its size
int size = WIDTH*WIDTH*sizeof(float);
printf("[MatrixMul of %d x %d elements]\n", WIDTH, WIDTH);
// Allocate the host input matrix h_A
float *h_A = (float *)malloc(size);
// Allocate the host input matrix h_B
float *h_B = (float *)malloc(size);
// Allocate the host input matrix h_C
float *h_C = (float *)malloc(size);
// Allocate the host matrix for compute check
float *reference = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL || reference == NULL)
{
fprintf(stderr, "Failed to allocate host matrices!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input matrices
for (int i = 0; i < WIDTH; ++i)
{
for (int j = 0; j < WIDTH; ++j)
{
h_A[i*WIDTH + j] = 0.01f;
h_B[i*WIDTH + j] = 1.0f;
}
}
memset(h_C, 0, size);
memset(reference, 0, size);
// compute the matrix multiplication on the CPU for comparison
computeGold(reference, h_A, h_B, WIDTH, WIDTH, WIDTH);
// Allocate device input matrices
// TODO : Leave/Remove the given cudaMalloc code properly
// -->
float* d_A = NULL;
err = cudaMalloc((void**)&d_A, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device matrix A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
float* d_B = NULL;
err = cudaMalloc((void**)&d_B, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device matrix B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// <--
// Allocate the device output matrix
float* d_C = NULL;
err = cudaMalloc((void**)&d_C, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device matrix C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input matrix A and B in host memory to the device input matrices in
// device memory
// TODO : Add proper mem copy APIs according to the memory that matrix A and B will be stored
// -->
printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) ;// FILL HERE
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy matrix A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) ;// FILL HERE
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy matrix B from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// <--
// TODO : Clock Measurements
// Add code to return clock cycles from kernel
// -->
#ifdef TM
unsigned long long* d_runtime;
int r_size = WIDTH*WIDTH*sizeof(unsigned long long);
unsigned long long* runtime = (unsigned long long*)malloc(r_size);
memset(runtime, 0, r_size);
cudaMalloc((void**)&d_runtime, r_size);
#endif
// <--
// TODO : Kernel Invocation
// Assign as many threads as the size of matrix in a thread block and
// invoke the kernel function.
// -->
int blocksPerGrid = 1 ;// FILL HERE
int threadsPerBlock = 1024 ;// FILL HERE
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
//MatrixMul(d_A, d_B, d_C);
MatrixMul<<<blocksPerGrid,threadsPerBlock>>>(d_A, d_B, d_C, d_runtime);
// <--
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch matrixMul kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaThreadSynchronize();
// Copy the device result matrix in device memory to the host result matrix
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy matrix C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaThreadSynchronize();
// Verify that the result matrix is correct
bool res = 1;
for (int i = 0; i < WIDTH*WIDTH; i++)
{
float diff = fabs(reference[i] - h_C[i]);
if(diff > 0.001f)
{
res = 0;
break;
}
}
printf("Test %s\n", (res == 1) ? "PASSED" : "FAILED");
// printf("The runtime is %llu", d_runtime);
// TODO : Get elapsed clock cycles from device to host
// Take the longest time as kernel execution time
// -->
#ifdef TM
cudaMemcpy(runtime, d_runtime, r_size, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
unsigned long long elapsed_time = 0;
for(int i = 0; i < WIDTH*WIDTH; i++)
if(elapsed_time < runtime[i])
elapsed_time = runtime[i];
printf("Kernel Execution Time: %llu cycles\n", elapsed_time);
#endif
// <--
// TODO : Free device global memory
// Leave/Remove the given cudaFree statement according to your data allocation
// -->
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
#ifdef TM
cudaFree(d_runtime);
#endif
// <--
// Free host memory
free(h_A);
free(h_B);
free(h_C);
free(reference);
#ifdef TM
free(runtime);
#endif
return 0;
}
void
computeGold(float* C, const float* A, const float* B, unsigned int hA, unsigned int wA, unsigned int wB)
{
for (unsigned int i = 0; i < hA; ++i)
for (unsigned int j = 0; j < wB; ++j) {
double sum = 0;
for (unsigned int k = 0; k < wA; ++k) {
double a = A[i * wA + k];
double b = B[k * wB + j];
sum += a * b;
}
C[i * wB + j] = (float)sum;
}
}
|
eee9e01a2e2065baefc63bce276c5ada652967d2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Facebook, Inc. and its affiliates.
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include <ATen/hip/HIPApplyUtils.cuh>
// TODO make it in a common file
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
template <typename T>
__device__ T bilinear_interpolate(
const T* bottom_data,
const int height,
const int width,
T y,
T x,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
return 0;
}
if (y <= 0)
y = 0;
if (x <= 0)
x = 0;
int y_low = (int)y;
int x_low = (int)x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = bottom_data[y_low * width + x_low];
T v2 = bottom_data[y_low * width + x_high];
T v3 = bottom_data[y_high * width + x_low];
T v4 = bottom_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__global__ void RoIAlignForward(
const int nthreads,
const T* bottom_data,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
const T* bottom_rois,
T* top_data,
bool aligned) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not use rounding; this implementation detail is critical
T offset = aligned ? (T)0.5 : (T)0.0;
T roi_start_w = offset_bottom_rois[1] * spatial_scale - offset;
T roi_start_h = offset_bottom_rois[2] * spatial_scale - offset;
T roi_end_w = offset_bottom_rois[3] * spatial_scale - offset;
T roi_end_h = offset_bottom_rois[4] * spatial_scale - offset;
T roi_width = roi_end_w - roi_start_w;
T roi_height = roi_end_h - roi_start_h;
if (!aligned) { // for backward-compatibility only
roi_width = max(roi_width, (T)1.);
roi_height = max(roi_height, (T)1.);
}
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T* offset_bottom_data =
bottom_data + (roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
// When the grid is empty, output zeros == 0/1, instead of NaN.
const T count = max(roi_bin_grid_h * roi_bin_grid_w, 1); // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T val = bilinear_interpolate(
offset_bottom_data, height, width, y, x, index);
output_val += val;
}
}
output_val /= count;
top_data[index] = output_val;
}
}
template <typename T>
__device__ void bilinear_interpolate_gradient(
const int height,
const int width,
T y,
T x,
T& w1,
T& w2,
T& w3,
T& w4,
int& x_low,
int& x_high,
int& y_low,
int& y_high,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y <= 0)
y = 0;
if (x <= 0)
x = 0;
y_low = (int)y;
x_low = (int)x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// reference in forward
// T v1 = bottom_data[y_low * width + x_low];
// T v2 = bottom_data[y_low * width + x_high];
// T v3 = bottom_data[y_high * width + x_low];
// T v4 = bottom_data[y_high * width + x_high];
// T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
return;
}
template <typename T>
__global__ void RoIAlignBackwardFeature(
const int nthreads,
const T* top_diff,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
T* bottom_diff,
const T* bottom_rois,
bool aligned) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not use rounding; this implementation detail is critical
T offset = aligned ? (T)0.5 : (T)0.0;
T roi_start_w = offset_bottom_rois[1] * spatial_scale - offset;
T roi_start_h = offset_bottom_rois[2] * spatial_scale - offset;
T roi_end_w = offset_bottom_rois[3] * spatial_scale - offset;
T roi_end_h = offset_bottom_rois[4] * spatial_scale - offset;
T roi_width = roi_end_w - roi_start_w;
T roi_height = roi_end_h - roi_start_h;
if (!aligned) { // for backward-compatibility only
roi_width = max(roi_width, (T)1.);
roi_height = max(roi_height, (T)1.);
}
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
T* offset_bottom_diff =
bottom_diff + (roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
const T* offset_top_diff = top_diff + top_offset;
const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient(
height,
width,
y,
x,
w1,
w2,
w3,
w4,
x_low,
x_high,
y_low,
y_high,
index);
T g1 = top_diff_this_bin * w1 / count;
T g2 = top_diff_this_bin * w2 / count;
T g3 = top_diff_this_bin * w3 / count;
T g4 = top_diff_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
atomicAdd(
offset_bottom_diff + y_low * width + x_low, static_cast<T>(g1));
atomicAdd(
offset_bottom_diff + y_low * width + x_high, static_cast<T>(g2));
atomicAdd(
offset_bottom_diff + y_high * width + x_low, static_cast<T>(g3));
atomicAdd(
offset_bottom_diff + y_high * width + x_high, static_cast<T>(g4));
} // if
} // ix
} // iy
} // CUDA_1D_KERNEL_LOOP
} // RoIAlignBackward
namespace detectron2 {
at::Tensor ROIAlign_forward_cuda(
const at::Tensor& input,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
bool aligned) {
AT_ASSERTM(input.device().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor");
at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
at::CheckedFrom c = "ROIAlign_forward_cuda";
at::checkAllSameGPU(c, {input_t, rois_t});
at::checkAllSameType(c, {input_t, rois_t});
at::hip::HIPGuardMasqueradingAsCUDA device_guard(input.device());
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
auto output = at::empty(
{num_rois, channels, pooled_height, pooled_width}, input.options());
auto output_size = num_rois * pooled_height * pooled_width * channels;
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(
at::cuda::ATenCeilDiv(
static_cast<int64_t>(output_size), static_cast<int64_t>(512)),
static_cast<int64_t>(4096)));
dim3 block(512);
if (output.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return output;
}
auto input_ = input.contiguous(), rois_ = rois.contiguous();
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "ROIAlign_forward", [&] {
hipLaunchKernelGGL(( RoIAlignForward<scalar_t>), dim3(grid), dim3(block), 0, stream,
output_size,
input_.data_ptr<scalar_t>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
rois_.data_ptr<scalar_t>(),
output.data_ptr<scalar_t>(),
aligned);
});
hipDeviceSynchronize();
AT_CUDA_CHECK(hipGetLastError());
return output;
}
// TODO remove the dependency on input and use instead its sizes -> save memory
at::Tensor ROIAlign_backward_cuda(
const at::Tensor& grad,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int batch_size,
const int channels,
const int height,
const int width,
const int sampling_ratio,
bool aligned) {
AT_ASSERTM(grad.device().is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor");
at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2};
at::CheckedFrom c = "ROIAlign_backward_cuda";
at::checkAllSameGPU(c, {grad_t, rois_t});
at::checkAllSameType(c, {grad_t, rois_t});
at::hip::HIPGuardMasqueradingAsCUDA device_guard(grad.device());
auto num_rois = rois.size(0);
auto grad_input =
at::zeros({batch_size, channels, height, width}, grad.options());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(
at::cuda::ATenCeilDiv(
static_cast<int64_t>(grad.numel()), static_cast<int64_t>(512)),
static_cast<int64_t>(4096)));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return grad_input;
}
auto grad_ = grad.contiguous(), rois_ = rois.contiguous();
AT_DISPATCH_FLOATING_TYPES(grad.scalar_type(), "ROIAlign_backward", [&] {
hipLaunchKernelGGL(( RoIAlignBackwardFeature<scalar_t>), dim3(grid), dim3(block), 0, stream,
grad.numel(),
grad_.data_ptr<scalar_t>(),
num_rois,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
grad_input.data_ptr<scalar_t>(),
rois_.data_ptr<scalar_t>(),
aligned);
});
AT_CUDA_CHECK(hipGetLastError());
return grad_input;
}
} // namespace detectron2
|
eee9e01a2e2065baefc63bce276c5ada652967d2.cu
|
// Copyright (c) Facebook, Inc. and its affiliates.
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/cuda/CUDAGuard.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
// TODO make it in a common file
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
template <typename T>
__device__ T bilinear_interpolate(
const T* bottom_data,
const int height,
const int width,
T y,
T x,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
return 0;
}
if (y <= 0)
y = 0;
if (x <= 0)
x = 0;
int y_low = (int)y;
int x_low = (int)x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = bottom_data[y_low * width + x_low];
T v2 = bottom_data[y_low * width + x_high];
T v3 = bottom_data[y_high * width + x_low];
T v4 = bottom_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__global__ void RoIAlignForward(
const int nthreads,
const T* bottom_data,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
const T* bottom_rois,
T* top_data,
bool aligned) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not use rounding; this implementation detail is critical
T offset = aligned ? (T)0.5 : (T)0.0;
T roi_start_w = offset_bottom_rois[1] * spatial_scale - offset;
T roi_start_h = offset_bottom_rois[2] * spatial_scale - offset;
T roi_end_w = offset_bottom_rois[3] * spatial_scale - offset;
T roi_end_h = offset_bottom_rois[4] * spatial_scale - offset;
T roi_width = roi_end_w - roi_start_w;
T roi_height = roi_end_h - roi_start_h;
if (!aligned) { // for backward-compatibility only
roi_width = max(roi_width, (T)1.);
roi_height = max(roi_height, (T)1.);
}
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T* offset_bottom_data =
bottom_data + (roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
// When the grid is empty, output zeros == 0/1, instead of NaN.
const T count = max(roi_bin_grid_h * roi_bin_grid_w, 1); // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T val = bilinear_interpolate(
offset_bottom_data, height, width, y, x, index);
output_val += val;
}
}
output_val /= count;
top_data[index] = output_val;
}
}
template <typename T>
__device__ void bilinear_interpolate_gradient(
const int height,
const int width,
T y,
T x,
T& w1,
T& w2,
T& w3,
T& w4,
int& x_low,
int& x_high,
int& y_low,
int& y_high,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y <= 0)
y = 0;
if (x <= 0)
x = 0;
y_low = (int)y;
x_low = (int)x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// reference in forward
// T v1 = bottom_data[y_low * width + x_low];
// T v2 = bottom_data[y_low * width + x_high];
// T v3 = bottom_data[y_high * width + x_low];
// T v4 = bottom_data[y_high * width + x_high];
// T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
return;
}
template <typename T>
__global__ void RoIAlignBackwardFeature(
const int nthreads,
const T* top_diff,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
T* bottom_diff,
const T* bottom_rois,
bool aligned) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not use rounding; this implementation detail is critical
T offset = aligned ? (T)0.5 : (T)0.0;
T roi_start_w = offset_bottom_rois[1] * spatial_scale - offset;
T roi_start_h = offset_bottom_rois[2] * spatial_scale - offset;
T roi_end_w = offset_bottom_rois[3] * spatial_scale - offset;
T roi_end_h = offset_bottom_rois[4] * spatial_scale - offset;
T roi_width = roi_end_w - roi_start_w;
T roi_height = roi_end_h - roi_start_h;
if (!aligned) { // for backward-compatibility only
roi_width = max(roi_width, (T)1.);
roi_height = max(roi_height, (T)1.);
}
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
T* offset_bottom_diff =
bottom_diff + (roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
const T* offset_top_diff = top_diff + top_offset;
const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient(
height,
width,
y,
x,
w1,
w2,
w3,
w4,
x_low,
x_high,
y_low,
y_high,
index);
T g1 = top_diff_this_bin * w1 / count;
T g2 = top_diff_this_bin * w2 / count;
T g3 = top_diff_this_bin * w3 / count;
T g4 = top_diff_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
atomicAdd(
offset_bottom_diff + y_low * width + x_low, static_cast<T>(g1));
atomicAdd(
offset_bottom_diff + y_low * width + x_high, static_cast<T>(g2));
atomicAdd(
offset_bottom_diff + y_high * width + x_low, static_cast<T>(g3));
atomicAdd(
offset_bottom_diff + y_high * width + x_high, static_cast<T>(g4));
} // if
} // ix
} // iy
} // CUDA_1D_KERNEL_LOOP
} // RoIAlignBackward
namespace detectron2 {
at::Tensor ROIAlign_forward_cuda(
const at::Tensor& input,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
bool aligned) {
AT_ASSERTM(input.device().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor");
at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
at::CheckedFrom c = "ROIAlign_forward_cuda";
at::checkAllSameGPU(c, {input_t, rois_t});
at::checkAllSameType(c, {input_t, rois_t});
at::cuda::CUDAGuard device_guard(input.device());
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
auto output = at::empty(
{num_rois, channels, pooled_height, pooled_width}, input.options());
auto output_size = num_rois * pooled_height * pooled_width * channels;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(
at::cuda::ATenCeilDiv(
static_cast<int64_t>(output_size), static_cast<int64_t>(512)),
static_cast<int64_t>(4096)));
dim3 block(512);
if (output.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return output;
}
auto input_ = input.contiguous(), rois_ = rois.contiguous();
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "ROIAlign_forward", [&] {
RoIAlignForward<scalar_t><<<grid, block, 0, stream>>>(
output_size,
input_.data_ptr<scalar_t>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
rois_.data_ptr<scalar_t>(),
output.data_ptr<scalar_t>(),
aligned);
});
cudaDeviceSynchronize();
AT_CUDA_CHECK(cudaGetLastError());
return output;
}
// TODO remove the dependency on input and use instead its sizes -> save memory
at::Tensor ROIAlign_backward_cuda(
const at::Tensor& grad,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int batch_size,
const int channels,
const int height,
const int width,
const int sampling_ratio,
bool aligned) {
AT_ASSERTM(grad.device().is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor");
at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2};
at::CheckedFrom c = "ROIAlign_backward_cuda";
at::checkAllSameGPU(c, {grad_t, rois_t});
at::checkAllSameType(c, {grad_t, rois_t});
at::cuda::CUDAGuard device_guard(grad.device());
auto num_rois = rois.size(0);
auto grad_input =
at::zeros({batch_size, channels, height, width}, grad.options());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(
at::cuda::ATenCeilDiv(
static_cast<int64_t>(grad.numel()), static_cast<int64_t>(512)),
static_cast<int64_t>(4096)));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return grad_input;
}
auto grad_ = grad.contiguous(), rois_ = rois.contiguous();
AT_DISPATCH_FLOATING_TYPES(grad.scalar_type(), "ROIAlign_backward", [&] {
RoIAlignBackwardFeature<scalar_t><<<grid, block, 0, stream>>>(
grad.numel(),
grad_.data_ptr<scalar_t>(),
num_rois,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
grad_input.data_ptr<scalar_t>(),
rois_.data_ptr<scalar_t>(),
aligned);
});
AT_CUDA_CHECK(cudaGetLastError());
return grad_input;
}
} // namespace detectron2
|
31be4359e894802fc9658e9f6a371f6c07ca87c6.hip
|
// !!! This is a file automatically generated by hipify!!!
/** @file vl_tmove.cu
** @brief MEX internals of vl_tmove.m.
** @author Andrea Vedaldi
**/
/*
Copyright (C) 2016 Andrea Vedaldi.
All rights reserved.
This file is part of the VLFeat library and is made available under
the terms of the BSD license (see the COPYING file).
*/
#include "bits/mexutils.h"
#include "bits/datamex.hpp"
#include "bits/data.hpp"
#if ENABLE_GPU
#include "bits/datacu.hpp"
#endif
#include "bits/impl/tinythread.h"
#include "bits/impl/blashelper.hpp"
#include <assert.h>
#include <errno.h>
#include <stdio.h>
#include <unistd.h>
#include <time.h>
#include <fcntl.h>
#include <poll.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/shm.h>
#include <sys/un.h>
#include <sys/socket.h>
#include <memory>
#include <vector>
#include <algorithm>
#include <sstream>
/**
\file vl_tmove.cu
The `vl_tmove` utility implements an efficient mechanism to exchange
tensor data between different MATLAB processes. Presently, it is
limited to processes running on the same host, but future extensions
can integrate networked environments. Even limited to a single
host, this functionality is important because MATLAB multiple VLDT_GPU
support uses different processess for different GPUs.
The key idea is to implement a reduction tree, in which each MATLAB
process is connected to a parent and a number of children. When a tensor
needs to be accumulated, a node receives copies form the children,
sums them with its local copy, and sends the result to the parent.
Eventually, the data flow reaches the root of the tree and the accumulated
tensor is sent back towards the leaves. This communication mechanism
is designed to reduce the amount of data transfers from O(n^2)
for the trivial n-to-n communication of tensor copies to O(n).
A second strategy used to significantly improve the speed is to allow
the transfer of tensor data to proceed in the background, while MATLAB is busy
running the rest of the network. This is achieved by isolating
all communications in a supervisory thread.
# Notable facts
* Communications between thread uses UNIX-domain sockets (extensible
to INet sockets in the future). These are used to send lightweight
cohordination messages.
* Data passing on local machines uses a shared memory map between
processes. The shared memory contains a copy of each tensor for each
process. VLDT_GPU tensors may either be allocated internally
by `vl_tmove` (in which case MATLAB may forget them)
or may remember pointers to MATLAB's memory (inplace).
The latter is slightly unsafe, but much faster as it saves several copies.
In any case, `vl_tmove` allocates a VLDT_GPU buffer as large as
the largest tensor as scratch space (and for direct VLDT_GPU communication).
* The supervisory and main threads collaborate through lock-less
synchronization for speed. This is possible because at any point in time
each tensor is managed by only one thread depending on its state.
Thus a tensor moves from one thread to the other simply by swapping
its state. There is, however, a condition variable to allow the
main thread to wait for the supervisory thread when needed.
* The supervisory thread waits by calling `poll()` on a number of sockets.
However, sometimes the main thread needs to signal the supervisor too.
This is realized by having a dummy `pipe()` between the two
threads.
**/
/* ---------------------------------------------------------------- */
/* Globals */
/* ---------------------------------------------------------------- */
enum {
IN_COMMAND, IN_END
} ;
enum {
OUT_RESULT, OUT_END
} ;
/* option codes */
enum {
opt_inplace = 0,
opt_verbose,
opt_prefix,
} ;
/* options */
VLMXOption options [] = {
{"prefix", 1, opt_prefix },
{"InPlace", 0, opt_inplace },
{"Verbose", 0, opt_verbose },
{0, 0, 0 }
} ;
int verbosity = 0 ;
vl::MexContext context ;
class SharedTensorDescriptor ;
class SharedTensorSpace ;
class ProcessPool ;
/* ---------------------------------------------------------------- */
/* Utility */
/* ---------------------------------------------------------------- */
static VLMXErrorCode vlmxParseDataType(vl::DataType & dataType, mxArray const * arg)
{
if (vlmxCompareToStringI(arg, "double") == 0) {
dataType = vl::VLDT_Double ;
return VLMXE_Success ;
} else if (vlmxCompareToStringI(arg, "single") == 0) {
dataType = vl::VLDT_Float ;
return VLMXE_Success ;
} else {
return VLMXE_IllegalArgument ;
}
}
static VLMXErrorCode vlmxParseDeviceType(vl::DeviceType & deviceType, mxArray const * arg)
{
if (vlmxCompareToStringI(arg, "cpu") == 0) {
deviceType = vl::VLDT_CPU ;
return VLMXE_Success ;
} else if (vlmxCompareToStringI(arg, "VLDT_GPU") == 0) {
deviceType = vl::VLDT_GPU ;
return VLMXE_Success ;
} else {
return VLMXE_IllegalArgument ;
}
}
static VLMXErrorCode vlmxParseString(std::string & name, mxArray const * arg)
{
char str [256] ;
if (!vlmxIsString(arg, -1)) {
return VLMXE_IllegalArgument ;
}
mxGetString(arg, str, sizeof(str)) ;
name = str ;
return VLMXE_Success ;
}
static VLMXErrorCode vlmxParseTensorShape(vl::TensorShape & shape, mxArray const * arg)
{
size_t dimensions [32] ;
if (!vlmxIsVector(arg, -1) || !vlmxIsPlain(arg)) {
return VLMXE_IllegalArgument ;
}
int nd = mxGetNumberOfElements(arg) ;
for (int k = 0 ; k < nd ; ++k) { dimensions[k] = (size_t)mxGetPr(arg)[k] ; }
shape.setDimensions(dimensions, nd) ;
return VLMXE_Success ;
}
/* ---------------------------------------------------------------- */
/* Logger */
/* ---------------------------------------------------------------- */
namespace vl {
class Logger
{
public:
Logger() ;
~Logger() ;
std::ostringstream & getStream() ;
protected:
std::ostringstream stringStream ;
private:
// Disable
Logger(const Logger&) ;
Logger& operator= (const Logger&) ;
} ;
}
vl::Logger::Logger()
{ }
vl::Logger::~Logger()
{
printf("%s\n", stringStream.str().c_str()) ;
//fflush(stdout) ;
}
std::ostringstream &
vl::Logger::getStream()
{
return stringStream ;
}
#define LOGERROR \
vl::Logger().getStream() \
<<"[error]"<<__func__<<"::lab "<<lab<<"::"
#define LOG(level) \
if (verbosity < level) { } \
else vl::Logger().getStream() \
<<"[info] "<<__func__<<"::lab "<<lab<<"::"
/* ---------------------------------------------------------------- */
/* SharedTensorDescriptor */
/* ---------------------------------------------------------------- */
#pragma mark -
// Describe one of the shared tensors: shape, data type,
// and device type.
class SharedTensorDescriptor
{
public:
SharedTensorDescriptor() ;
~SharedTensorDescriptor() ;
void init(vl::DeviceType deviceType,
vl::DataType dataType,
vl::TensorShape const & shape) ;
void finalize() ;
size_t getSizeInBytes() const ;
SharedTensorDescriptor & operator=(SharedTensorDescriptor const & tensor) ;
// Data.
vl::DeviceType deviceType ;
vl::DataType dataType ;
vl::TensorShape shape ;
} ;
SharedTensorDescriptor::SharedTensorDescriptor()
{ }
SharedTensorDescriptor::~SharedTensorDescriptor()
{
finalize() ;
}
SharedTensorDescriptor &
SharedTensorDescriptor::operator=(SharedTensorDescriptor const & tensor)
{
deviceType = tensor.deviceType ;
dataType = tensor.dataType ;
shape = tensor.shape ;
return *this ;
}
void SharedTensorDescriptor::init(vl::DeviceType newDeviceType,
vl::DataType newDataType,
vl::TensorShape const & newShape)
{
assert(newDeviceType == vl::VLDT_CPU || newDeviceType == vl::VLDT_GPU) ;
assert(newDataType == vl::VLDT_Float || newDataType == vl::VLDT_Double) ;
deviceType = newDeviceType ;
dataType = newDataType ;
shape = newShape ;
}
void SharedTensorDescriptor::finalize()
{ }
size_t SharedTensorDescriptor::getSizeInBytes() const
{
return shape.getNumElements() * getDataTypeSizeInBytes(dataType) ;
}
/* ---------------------------------------------------------------- */
/* SharedTensorSpace */
/* ---------------------------------------------------------------- */
#pragma mark -
// SharedTensorSpace holds a list of tensors that can be accumulated
// between different processes.
//
// It encapsualtes in particular: the shared memory map,
// the VLDT_GPU dispatch buffer, and, possibly, for non-inplace operations
// and VLDT_GPU arrays, a copy of the VLDT_GPU data.
//
// This class is not thread safe, so the MATLAB and flow supervisor thread
// must properly syncrhonize in accessing it.
class SharedTensorSpace
{
public:
SharedTensorSpace() ;
~SharedTensorSpace() ;
vl::ErrorCode mexInit(mxArray const *mexDescriptor) ;
void finalize() ;
vl::ErrorCode attach(std::string const & prefix, int lab, int numLabs) ;
vl::ErrorCode attachPeer(int lab) ;
void mexPrint() const ;
void dump() const ;
private:
bool initialized ;
int lab ;
int numLabs ;
enum SharedTensorState {
ready,
accumulateChildren,
waitParent,
waitChildren,
} state ;
// This class represents an instance of a shared tensor. It contain
// its state@transaction pair and information on its memory location.
struct SharedTensorInstance
{
std::string name ;
SharedTensorDescriptor descriptor ;
SharedTensorState state ;
size_t transaction ;
size_t finalTransaction ;
int numChildrenToAccumulate ;
size_t memoryMapOffset ;
void * cpuMemory ;
void * gpuMemory ;
bool gpuMemoryIsOwned ;
#if ENABLE_GPU
hipEvent_t gpuEvent ;
bool gpuEventIsInitialized ;
#endif
bool operator==(std::string const & theName) { return name == theName ; }
SharedTensorInstance()
: state(ready), transaction(0), finalTransaction((size_t)-1),
cpuMemory(NULL), gpuMemory(NULL), gpuMemoryIsOwned(false)
#if ENABLE_GPU
, gpuEvent(0), gpuEventIsInitialized(false)
#endif
{ }
} ;
typedef std::vector<SharedTensorInstance> tensors_t ;
tensors_t tensors ;
struct SharedTensorPeerInstance
{
int lab ;
SharedTensorState state ;
size_t transaction ;
size_t finalTransaction ;
void *mappedCpuMemory ;
void *mappedGpuMemory ;
bool accumulated ;
bool operator==(int theLab) { return lab == theLab ; }
SharedTensorPeerInstance()
: lab(-1), state(ready), transaction(0),
mappedCpuMemory(NULL), mappedGpuMemory(NULL), accumulated(false),
finalTransaction((size_t)-1) { }
} ;
typedef std::vector<std::vector<SharedTensorPeerInstance> > peerTensors_t ;
peerTensors_t peerTensors ;
SharedTensorPeerInstance & getPeerTensor(int tensorIndex, int lab) ;
// Shared CPU memory
void * memoryMap ;
size_t memoryMapSize ;
size_t memoryMapLabStride ;
std::string memoryMapName ;
int memoryMapFD ;
bool memoryMapIsCudaRegistered ;
// Additional VLDT_GPU memory
void * gpuDispatchMemory ;
int gpuDevice ;
#if ENABLE_GPU
// Todo: one for each mapped peer dispatch memory
hipIpcMemHandle_t gpuMemoryHandle ;
hipStream_t gpuHelperStream ;
hipEvent_t gpuHelperEvent ;
bool gpuHelperStreamInitialized ;
bool gpuHelperEventInitialized ;
#endif
friend class ProcessPool ;
} ;
SharedTensorSpace::SharedTensorSpace()
: initialized(false),
memoryMapFD(-1),
memoryMap(NULL),
memoryMapIsCudaRegistered(false),
memoryMapSize(0),
gpuDevice(-1),
gpuDispatchMemory(NULL)
#if ENABLE_GPU
, gpuHelperStream(0),
gpuHelperStreamInitialized(false),
gpuHelperEventInitialized(false)
#endif
{ }
SharedTensorSpace::~SharedTensorSpace()
{
finalize() ;
}
// This function initializes the SharedTensorSpace using
// a MATLAB cell array as descriptor for the space content.
// It can throw a MEX error, so it must be called from
// the MATLAB thread.
vl::ErrorCode SharedTensorSpace::mexInit(mxArray const *descriptor)
{
assert(descriptor) ;
if (initialized) {
mexErrMsgTxt("Already initialized. Use 'reset' to clear.") ;
}
lab = -1 ;
numLabs = 0 ;
memoryMapName = "" ;
memoryMapSize = 0 ;
memoryMapLabStride = 0 ;
// Parse tensor list
if (!mxIsCell(descriptor)) {
mexErrMsgTxt("DESCRIPTOR is not a cell array.") ;
}
if (mxGetNumberOfDimensions(descriptor) != 2) {
mexErrMsgTxt("DESCRIPTOR does not have two dimensions.") ;
}
if (mxGetN(descriptor) != 3 &&
mxGetN(descriptor) != 4) {
mexErrMsgTxt("DESCRIPTOR does not have three or four columns.") ;
}
size_t numTensors = mxGetM(descriptor) ;
size_t offset = 0 ;
size_t const alignFactor = 16 ;
bool useGPU = false ;
for (int i = 0 ; i < numTensors ; ++i) {
VLMXErrorCode error ;
vl::DeviceType deviceType = vl::VLDT_CPU ;
vl::DataType dataType ;
vl::TensorShape shape ;
std::string name ;
error = vlmxParseDataType(dataType, mxGetCell(descriptor, 0*numTensors + i)) ;
if (error != VLMXE_Success) {
vlmxError(error, "DESCRIPTOR{%d,1} is not a valid data type.", i+1) ;
}
error = vlmxParseTensorShape(shape, mxGetCell(descriptor, 1*numTensors + i)) ;
if (error != VLMXE_Success) {
vlmxError(error, "DESCRIPTOR{%d,2} is not a valid tensor shape.", i+1) ;
}
error = vlmxParseString(name, mxGetCell(descriptor, 2*numTensors + i)) ;
if (error != VLMXE_Success) {
vlmxError(error, "DESCRIPTOR{%d,3} is not a valid tensor name.", i+1) ;
}
if (mxGetN(descriptor) == 4) {
error = vlmxParseDeviceType(deviceType, mxGetCell(descriptor, 3*numTensors + i)) ;
if (error != VLMXE_Success) {
vlmxError(error, "DESCRIPTOR{%d,4} is not a valid device type name.", i+1) ;
}
}
if (deviceType == vl::VLDT_GPU) {
#if not defined(ENABLE_GPU)
vlmxError(VLMXE_IllegalArgument, "VLDT_GPU support not compiled.") ;
#endif
useGPU = true ;
}
// Add the new tensor to the table.
{
SharedTensorInstance tensor ;
tensor.name = name ;
tensor.descriptor.init(deviceType, dataType, shape) ;
tensor.memoryMapOffset = offset ;
tensors.push_back(tensor) ;
offset +=
vl::divideAndRoundUp(tensor.descriptor.getSizeInBytes(), alignFactor) * alignFactor ;
if (verbosity >= 2) {
mexPrintf("[info] %s: registered tensor %s\n", __func__, name.c_str()) ;
}
}
}
// Size of the memory allocated for one lab (with a copy of all tensors).
memoryMapName = "/mcn" ;
size_t const pageSize = getpagesize() ;
memoryMapLabStride = vl::divideAndRoundUp(offset, pageSize) * pageSize ;
memoryMapSize = 0 ;
#if ENABLE_GPU
if (useGPU) {
hipGetDevice(&gpuDevice) ; // to inform thread
LOG(2) << "current CUDA device: " << gpuDevice ;
}
#endif
initialized = true ;
return vl::VLE_Success ;
}
// Get the peer tensor corresponding to a given
// tensor and process index.
SharedTensorSpace::SharedTensorPeerInstance &
SharedTensorSpace::getPeerTensor(int tensorIndex, int lab)
{
std::vector<SharedTensorPeerInstance>::iterator PT
= std::find(peerTensors[tensorIndex].begin(), peerTensors[tensorIndex].end(), lab) ;
assert(PT != peerTensors[tensorIndex].end()) ;
return *PT ;
}
/// Attach the shared space. This allocates the shared memory map
/// for inter-process data transfers containing all tensors,
/// and the VLDT_GPU dispatch memory.
vl::ErrorCode SharedTensorSpace::attach(std::string const & prefix, int lab, int numLabs)
{
int error ;
this->lab = lab ;
this->numLabs = numLabs ;
// Create the memory map name from the prefix.
memoryMapName = std::string("/") + prefix ;
// The root lab deletes a pre-existing memory object, if any.
if (lab == 0) {
error = shm_unlink(memoryMapName.c_str()) ;
if (error == -1) {
switch (errno) {
case ENOENT:
// Fine, there wasn't such a memory map anyways.
break ;
default:
LOGERROR
<< "could not delete the stale memory map '"
<< memoryMapName.c_str()
<< "' because '" << strerror(errno) << '\'' ;
return vl::VLE_Unknown ;
}
}
}
// Open/create the shared memory file descriptor.
memoryMapSize = memoryMapLabStride * numLabs ;
memoryMapFD = shm_open(memoryMapName.c_str(),
(lab == 0 ? O_CREAT:0)| O_RDWR, S_IRUSR | S_IWUSR) ;
if (memoryMapFD == -1) {
LOGERROR << "shm_open() failed because " << strerror(errno) ;
close(memoryMapFD) ;
memoryMapFD = -1 ;
return vl::VLE_Unknown ;
}
// The root process set the size of the shared memory.
if (lab == 0) {
if (ftruncate(memoryMapFD, memoryMapSize) == -1) {
LOGERROR << "truncate failed because " << strerror(errno) ;
return vl::VLE_OutOfMemory ;
}
}
// Map the memory.
memoryMap = mmap(0, memoryMapSize,
PROT_READ | PROT_WRITE, MAP_SHARED,
memoryMapFD, 0) ;
if (memoryMap == MAP_FAILED) {
LOGERROR << "mmap failed because " << strerror(errno) ;
memoryMap = NULL ;
close(memoryMapFD) ;
memoryMapFD = -1 ;
return vl::VLE_Unknown ;
}
memoryMapIsCudaRegistered = false ;
// The FD is not needed after mmap.
close(memoryMapFD) ;
memoryMapFD = -1 ;
// Associate memory to tensors.
#if ENABLE_GPU
size_t maxGPUTensorSize = 0 ;
#endif
for (int t = 0 ; t < tensors.size() ; ++t) {
tensors[t].cpuMemory = (char*)memoryMap
+ tensors[t].memoryMapOffset
+ lab * memoryMapLabStride ;
#if ENABLE_GPU
if (tensors[t].descriptor.deviceType == vl::VLDT_GPU) {
// Lazy allocation (to allow inplace operations).
tensors[t].gpuMemory = NULL ;
tensors[t].gpuMemoryIsOwned = false ;
maxGPUTensorSize = ::max(maxGPUTensorSize,
tensors[t].descriptor.getSizeInBytes()) ;
hipError_t cerror = hipEventCreate(&tensors[t].gpuEvent) ;
if (cerror != hipSuccess) {
LOGERROR
<< "CUDA could not create an event because '"
<< hipGetErrorString(cerror) << '\'' ;
return vl::VLE_Cuda ;
}
tensors[t].gpuEventIsInitialized = true ;
}
#endif
}
#if ENABLE_GPU
if (maxGPUTensorSize > 0) {
hipError_t cerror ;
cerror = hipMalloc(&gpuDispatchMemory, maxGPUTensorSize) ;
if (cerror != hipSuccess) {
LOGERROR
<< "could not allocate VLDT_GPU memory for dispatch because '"
<< hipGetErrorString(cerror) << '\'' ;
gpuDispatchMemory = NULL ;
return vl::VLE_Cuda ;
}
// To parallelize memory transfers we use a separate CUDA stream.
cerror = hipStreamCreateWithFlags(&gpuHelperStream, hipStreamNonBlocking) ;
if (cerror != hipSuccess) {
LOGERROR
<< "could not create a CUDA stream because '"
<< hipGetErrorString(cerror) << '\'' ;
return vl::VLE_Cuda ;
}
gpuHelperStreamInitialized = true ;
// Pin all shared host memory.
cerror = hipHostRegister(memoryMap,
memoryMapSize,
hipHostRegisterDefault) ;
if (cerror != hipSuccess) {
LOGERROR
<< "CUDA generated an error while pinning the shared host memory: '"
<< hipGetErrorString(cerror) << '\'' ;
} else {
LOG(2) << "pinned shared memory" ;
memoryMapIsCudaRegistered = true ;
}
}
#endif
return vl::VLE_Success ;
}
// attachPeer
vl::ErrorCode
SharedTensorSpace::attachPeer(int lab)
{
if (peerTensors.size() != tensors.size()) {
peerTensors.resize(tensors.size()) ;
}
for (int t = 0 ; t < tensors.size() ; ++t) {
SharedTensorPeerInstance peerTensor ;
peerTensor.lab = lab ;
peerTensor.state = SharedTensorSpace::ready ;
peerTensor.mappedCpuMemory = (char*)memoryMap
+ tensors[t].memoryMapOffset
+ lab * memoryMapLabStride ;
peerTensor.accumulated = false ;
peerTensors[t].push_back(peerTensor) ;
}
return vl::VLE_Success ;
}
// Destroy all resources
// 1) unmap and unlink shared memory map
// 2) ...
void SharedTensorSpace::finalize()
{
int error ;
initialized = false ;
#if ENABLE_GPU
if (memoryMap && memoryMapIsCudaRegistered) {
hipHostUnregister(memoryMap) ;
}
// if (gpuHelperEventInitialized) {
// hipEventDestroy(gpuHelperEvent) ;
// gpuHelperEventInitialized = false ;
// }
if (gpuHelperStreamInitialized) {
hipStreamDestroy(gpuHelperStream) ;
gpuHelperStream = 0 ;
gpuHelperStreamInitialized = false ;
}
if (gpuDispatchMemory) {
hipFree(gpuDispatchMemory) ;
gpuDispatchMemory = NULL ;
}
for (tensors_t::iterator T = tensors.begin() ;
T != tensors.end() ;
T++)
{
if (T->gpuMemory && T->gpuMemoryIsOwned) {
hipFree(T->gpuMemory) ;
T->gpuMemory = NULL ;
T->gpuMemoryIsOwned = false ;
}
if (T->gpuEventIsInitialized) {
hipEventDestroy(T->gpuEvent) ;
T->gpuEvent = 0 ;
T->gpuEventIsInitialized = false ;
}
}
gpuDevice = -1 ;
#endif
if (memoryMap) {
munmap(memoryMap, memoryMapSize) ;
memoryMap = NULL ;
}
if (memoryMapFD != -1) {
// This should have beeen closed right after mmap().
close(memoryMapFD) ;
memoryMapFD = -1 ;
}
error = shm_unlink(memoryMapName.c_str()) ;
if (error == -1 && errno == EACCES) {
LOGERROR << "Cannot clear the shared memory map due to a permission error." ;
}
tensors.clear() ;
numLabs = -1 ;
}
// For debugging
void SharedTensorSpace::dump() const
{
for (int tensorIndex = 0 ; tensorIndex < tensors.size() ; ++tensorIndex) {
SharedTensorInstance const & T = tensors[tensorIndex] ;
char const * stateString ;
switch (T.state) {
case ready: stateString="ready" ; break ;
case accumulateChildren: stateString="accumulateChildren" ; break ;
case waitParent: stateString="waitParent" ; break ;
case waitChildren: stateString="waitChildren" ; break ;
}
LOG(0)<<"Tensor " << T.name ;
LOG(0)<<"\tState: " << stateString ;
LOG(0)<<"\ttransaction: "<<T.transaction ;
if (peerTensors.size() > tensorIndex) {
for (int p = 0 ; p < peerTensors[tensorIndex].size() ; ++p) {
SharedTensorPeerInstance const & PT = peerTensors[tensorIndex][p] ;
switch (PT.state) {
case ready: stateString="ready" ; break ;
case accumulateChildren: stateString="accumulateChildren" ; break ;
case waitParent: stateString="waitParent" ; break ;
case waitChildren: stateString="waitChildren" ; break ;
}
LOG(0)<<"\tPeer on lab " << PT.lab << ": " << stateString;
LOG(0)<<"\t\ttransaction:" << PT.transaction ;
}
}
}
}
void SharedTensorSpace::mexPrint() const
{
mexPrintf("\tlab %d of %d\n", lab, numLabs) ;
mexPrintf("\tshared memory: '%s', %d bytes mapped at address: 0x%zx\n",
memoryMapName.c_str(),memoryMapSize,memoryMap) ;
for (int tensorIndex = 0 ; tensorIndex < tensors.size() ; ++tensorIndex) {
SharedTensorInstance const & T = tensors[tensorIndex] ;
mexPrintf("\tTensor '%s'\n", T.name.c_str()) ;
mexPrintf("\t\t[") ;
for (int k = 0 ; k < T.descriptor.shape.getNumDimensions() ; ++k) {
mexPrintf(" %d", T.descriptor.shape.getDimensions()[k]) ;
}
mexPrintf("] %s %s\n",
T.descriptor.dataType == vl::VLDT_Double?"double":"single",
T.descriptor.deviceType == vl::VLDT_CPU?"CPU":"VLDT_GPU") ;
mexPrintf("\t\tCPU address: 0x%zx\n", T.cpuMemory) ;
mexPrintf("\t\tGPU address: 0x%zx\n", T.gpuMemory) ;
if (peerTensors.size() > tensorIndex) {
for (int p = 0 ; p < peerTensors[tensorIndex].size() ; ++p) {
SharedTensorPeerInstance const & PT = peerTensors[tensorIndex][p] ;
mexPrintf("\t\tPeer instance %d\n", p) ;
mexPrintf("\t\t\tlab: %0d\n", PT.lab) ;
mexPrintf("\t\t\tmapped CPU address: 0x%zx\n",PT.mappedCpuMemory) ;
}
}
}
}
/* ---------------------------------------------------------------- */
/* ProcessPool */
/* ---------------------------------------------------------------- */
#pragma mark -
/// Represents a pool of collaborating MATLAB processes. Usually each
/// process corresponds to a certain MATLAB instance in a MATLAB pool.
class ProcessPool
{
public:
/// Create an un-intialized ProcessPool. Before it is used,
/// the pool must be initialized using init(). This design allows
/// to catch errors during initialization without resorting to exceptions.
ProcessPool() ;
/// Automatically calls ::finalize().
~ProcessPool() ;
/// Initialize the instance \a lab of \a numLabs pools. The function
/// timesout.
vl::ErrorCode init(std::string const & prefix, int lab,
int numLabs, SharedTensorSpace * space) ;
/// Gracefully shutdown the connection with the other processes,
/// waiting for them to finish updating as needed. After this, the
/// supervisory thread quits, but the object remains initialized
/// to allow reading off the final value of the tensor.
///
/// The function timesout.
vl::ErrorCode shutdown() ;
/// Immediately terminate the ProcessPool instance and release all
/// resources.
void finalize() ;
/// Print information.
///
/// This function must be called from the MATLAB thread.
void mexPrint() const ;
/// Push a tensor in the pool for accumulation.
///
/// This function must be called from the MATLAB thread. It throws
/// a MEX error on error and can time out.
void mexPush(std::string const & name, mxArray const * x,
bool inplace = false) ;
/// Pull an accumulated tensor from the pool.
///
/// This function must be called from the MATLAB thread. It throws
/// a MEX error on error and an time out.
mxArray * mexPull(std::string const & name, bool inplace = false) ;
/// Check whether the instance is intialized or not.
bool isInitialized() const { return initialized ; }
private:
bool initialized ;
std::string prefix ;
int lab ;
int numLabs ;
size_t timeoutInterval ;
SharedTensorSpace * sharedSpace ;
// Messages between peer processes.
struct Message
{
enum MessageType {
/// Sent from root to leaves to request initialization during
/// hanshake.
init,
/// Sent from leaves to root to acknowledge initialization.
initDone,
/// Sent from root to leaves to request attching the shared
/// resources (shared memory).
attach,
/// Sent to advertise a state change for a tensor.
tensorStateChange,
/// Shutdown sequence
requestShutdown,
/// Communicate the final transaction index for quitting.
tensorFinalTransaction
}
type ;
/// The transaction number.
size_t transaction ;
/// The final transaction number.
size_t finalTransaction ;
// Sender and destination process indexes.
int16_t from ;
int16_t to ;
// Session identifier, used for sanity checks.
uint32_t session ;
// Tensort ID and state for a tensor state change.
uint32_t tensorId ;
SharedTensorSpace::SharedTensorState tensorState ;
Message() : transaction(0), finalTransaction((size_t)-1), tensorId(0) { }
} ;
class Supervisor {
public:
Supervisor(ProcessPool& pool)
: pool(pool), thread(NULL), state(down),
socketFD(-1) { pipeFD[0] = -1 ; pipeFD[1] = -1 ; }
~Supervisor() { finalize() ; }
vl::ErrorCode init() ;
void finalize() ;
vl::ErrorCode shutdown() ;
vl::ErrorCode beginTransaction(int tensorIndex) ;
vl::ErrorCode waitTensor(int tensorIndex) ;
private:
ProcessPool & pool ;
tthread::thread * thread ;
enum State {
connecting,
running,
shuttingDown,
down} state ;
// Peer processes.
struct Peer
{
int lab ;
int socketFD ;
bool cudaCanAccessPeer ; //hipDeviceCanAccessPeer
bool shutdownRequested ;
Peer(int lab)
: lab(lab), socketFD(-1),
cudaCanAccessPeer(false),
shutdownRequested(false)
{ }
bool operator== (int lab) { return this->lab == lab ; }
} ;
typedef std::vector<Peer> peers_t ;
peers_t peers ;
// Comms.
uint32_t session ;
int pipeFD [2] ;
int socketFD ;
tthread::mutex mutex ;
tthread::condition_variable waitingList ;
bool shutdownRequested ; // local
bool forceQuit ;
static void threadEntryPoint(void * thing) ;
void entryPoint() ;
vl::ErrorCode connect() ;
void disconnect() ;
vl::ErrorCode handshake() ;
vl::ErrorCode loop() ;
vl::ErrorCode send(Message &msg, int to) ;
vl::ErrorCode receive(Message &msg, int from, int timeout = -1) ;
vl::ErrorCode handleAccumulateChildren(int tensorIndex) ;
vl::ErrorCode handleWaitParent(int tensorIndex) ;
vl::ErrorCode handleWaitChildren(int tensorIndex) ;
} supervisor ;
} ;
ProcessPool::ProcessPool()
: supervisor(*this),
initialized(false),
lab(-1), numLabs(0)
{ }
ProcessPool::~ProcessPool()
{
finalize() ;
}
vl::ErrorCode ProcessPool::init(std::string const & newPrefix, int newLab, int newNumLabs, SharedTensorSpace * newSharedSpace)
{
vl::ErrorCode error ;
assert(newLab >= 0) ;
assert(newNumLabs > newLab) ;
assert(newSharedSpace) ;
// finalize process pool if previously initialized
finalize() ;
// set members
prefix = newPrefix ;
lab = newLab ;
numLabs = newNumLabs ;
sharedSpace = newSharedSpace ;
timeoutInterval = 30UL * 1000UL * 1000UL ; // 30s in us
error = supervisor.init() ;
if (error == vl::VLE_Success) {
initialized = true ;
}
return error ;
}
vl::ErrorCode ProcessPool::shutdown()
{
return supervisor.shutdown() ;
}
void ProcessPool::finalize()
{
supervisor.finalize() ;
if (sharedSpace) {
sharedSpace->finalize() ;
delete sharedSpace ;
sharedSpace = NULL ;
}
lab = -1 ;
numLabs = 0 ;
initialized = false ;
}
void ProcessPool::mexPrint() const
{
tthread::lock_guard<tthread::mutex> (mutex) ;
if (sharedSpace) {
sharedSpace->mexPrint() ;
} else {
mexPrintf("Uninitialized.") ;
}
}
void ProcessPool::mexPush(std::string const & name,
mxArray const * x,
bool inplace)
{
// Search tensor by name.
SharedTensorSpace::tensors_t::iterator T
= std::find(sharedSpace->tensors.begin(), sharedSpace->tensors.end(), name) ;
if (T == sharedSpace->tensors.end()) {
vlmxError(VLMXE_IllegalArgument, "There is no tensor '%s'.", name.c_str()) ;
}
// Encapsulate MATLAB argument and check tensor compatibility.
vl::MexTensor mtens(context) ;
mtens.init(x) ;
if (mtens.getDeviceType() != T->descriptor.deviceType) {
vlmxError(VLMXE_IllegalArgument, "The tensor device type is incorrect.") ;
}
if (mtens.getDataType() != T->descriptor.dataType) {
vlmxError(VLMXE_IllegalArgument, "The tensor data type is incorrect.") ;
}
if (mtens.getNumElements() != T->descriptor.shape.getNumElements()) {
vlmxError(VLMXE_IllegalArgument, "The tensor shape is incorrect.") ;
}
if (inplace && T->descriptor.deviceType != vl::VLDT_GPU) {
vlmxError(VLMXE_IllegalArgument, "Inplace operations are supported only for VLDT_GPU arrays.") ;
}
// Wait until the tensor is in ready state
vl::ErrorCode error = supervisor.waitTensor(T - sharedSpace->tensors.begin()) ;
if (error != vl::VLE_Success) {
vlmxError(VLMXE_Execution, "Timeout or disconnected while waiting for tensor '%s' to become ready.", T->name.c_str()) ;
}
// Copy memory to SharedSpace
if (T->descriptor.deviceType == vl::VLDT_CPU) {
memcpy(T->cpuMemory, mtens.getMemory(), T->descriptor.getSizeInBytes()) ;
} else {
#if ENABLE_GPU
hipError_t cerror ;
// sync main thread (do not start until the parameters have been computed!)
hipEventRecord(T->gpuEvent, 0) ;
hipStreamWaitEvent(sharedSpace->gpuHelperStream, T->gpuEvent, 0) ;
if (inplace) {
if (T->gpuMemoryIsOwned && T->gpuMemory) {
// Free the previously allocated memory as we are going to use
// an inplace operation on this tensor.
hipFree(T->gpuMemory) ;
T->gpuMemory = NULL ;
}
T->gpuMemoryIsOwned = false ;
T->gpuMemory = mtens.getMemory() ;
} else {
if (T->gpuMemoryIsOwned == false || T->gpuMemory == NULL) {
cerror = hipMalloc(&T->gpuMemory,
T->descriptor.getSizeInBytes()) ;
if (cerror != hipSuccess) {
T->gpuMemory = NULL ;
T->gpuMemoryIsOwned = false ;
vlmxError(VLMXE_Alloc, "CUDA error while allocating VLDT_GPU memory (%s).",
hipGetErrorString(cerror)) ;
}
T->gpuMemoryIsOwned = true ;
cerror = hipMemcpyAsync (T->gpuMemory,
mtens.getMemory(),
T->descriptor.getSizeInBytes(),
hipMemcpyDeviceToDevice,
sharedSpace->gpuHelperStream) ;
if (cerror != hipSuccess) {
vlmxError(VLMXE_Execution, "CUDA error while copying VLDT_GPU data (%s).",
hipGetErrorString(cerror)) ;
}
}
}
#endif
}
supervisor.beginTransaction(T - sharedSpace->tensors.begin()) ;
}
mxArray * ProcessPool::mexPull(std::string const & name, bool inplace)
{
// Search the tensor with the specified name.
SharedTensorSpace::tensors_t::const_iterator T
= std::find(sharedSpace->tensors.begin(), sharedSpace->tensors.end(), name) ;
if (T == sharedSpace->tensors.end()) {
vlmxError(VLMXE_IllegalArgument, "There is no tensor with the specified name.") ;
}
if (inplace && T->descriptor.deviceType != vl::VLDT_GPU) {
vlmxError(VLMXE_IllegalArgument, "Inplace operations are supported only for VLDT_GPU arrays.") ;
}
// Wait until the tensor is in ready state
vl::ErrorCode error = supervisor.waitTensor(T - sharedSpace->tensors.begin()) ;
if (error != vl::VLE_Success) {
vlmxError(VLMXE_Execution, "Timeout or disconnected while waiting for tensor '%s' to become ready.", T->name.c_str()) ;
}
if (inplace) {
// With in-place operations, the only purpose of pull() is to wait until
// the tensor is ready and can be accessed.
return NULL ;
} else {
vl::MexTensor result(context) ;
result.init(T->descriptor.deviceType, T->descriptor.dataType, T->descriptor.shape) ;
if (T->descriptor.deviceType == vl::VLDT_CPU) {
memcpy(result.getMemory(),
T->cpuMemory,
T->descriptor.getSizeInBytes()) ;
} else {
#if ENABLE_GPU
// Synchronous with main thread.
hipError_t cerror = hipMemcpyAsync (result.getMemory(),
T->gpuMemory,
T->descriptor.getSizeInBytes(),
hipMemcpyDeviceToDevice,
sharedSpace->gpuHelperStream) ;
if (cerror != hipSuccess) {
vlmxError(VLMXE_Execution, "CUDA generated an error while copying VLDT_GPU data: '%s'.",
hipGetErrorString(cerror)) ;
}
cerror = hipStreamSynchronize(sharedSpace->gpuHelperStream) ;
if (cerror != hipSuccess) {
vlmxError(VLMXE_Execution, "CUDA generated an error while synchronizing a stream: '%s'.",
hipGetErrorString(cerror)) ;
}
#endif
}
return result.relinquish() ;
}
}
/* ---------------------------------------------------------------- */
/* ProcessPool::Supervisor */
/* ---------------------------------------------------------------- */
#pragma mark -
#undef LOGERROR
#define LOGERROR \
vl::Logger().getStream() \
<<"[error]"<<__func__<<"::lab "<<pool.lab<<"::"
#undef LOG
#define LOG(level) \
if (verbosity < level) { } \
else vl::Logger().getStream() \
<<"[info] "<<__func__<<"::lab "<<pool.lab<<"::"
void ProcessPool::Supervisor::threadEntryPoint(void * thing)
{
((ProcessPool::Supervisor*)thing)->entryPoint() ;
}
vl::ErrorCode ProcessPool::Supervisor::init()
{
vl::ErrorCode error = vl::VLE_Success ;
finalize() ;
// Infer parent and children labs.
int bit = ffs(pool.lab) - 1 ;
if (bit == -1) { bit = 31 ; }
int parent = pool.lab & (~(1 << bit)) ;
if (parent != pool.lab) {
// peers[0] always contain the parent (except for root)
peers.push_back(Peer(parent)) ;
}
for (int k = 0 ; k < bit ; ++k) {
int child = pool.lab | (1 << k) ;
if (child < pool.numLabs) {
// Which peers[] gets which children is determined later
// during hadshake based on the random connection order.
// Here we assign a provisional lab index using negative indexes
// as these are needed to use send().
peers.push_back(Peer(-child)) ;
}
}
state = connecting ;
shutdownRequested = false ;
forceQuit = false ;
thread = new tthread::thread(threadEntryPoint, this) ;
// Wait for initialization to be complete.
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
while (state == connecting) {
waitingList.wait(mutex) ;
}
if (state == running) {
error = vl::VLE_Success ;
} else {
error = vl::VLE_Unknown ;
}
}
return error ;
}
void ProcessPool::Supervisor::finalize()
{
if (thread) {
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
forceQuit = true ;
if (pipeFD[1] >= 0) {
char dummy = 1 ;
write(pipeFD[1], &dummy, 1) ;
}
}
if (thread->joinable()) {
thread->join() ;
}
delete thread ;
thread = NULL ;
}
peers.clear() ;
}
vl::ErrorCode ProcessPool::Supervisor::shutdown()
{
// Signal the supervisory thread
shutdownRequested = true ;
char dummy = 1 ;
write(pipeFD[1], &dummy, 1) ;
// Wait for shutdown to complete
{
size_t start = vl::getTime() ;
tthread::lock_guard<tthread::mutex> lock(mutex) ;
while (state != down) {
if (vl::getTime() > start + pool.timeoutInterval) {
LOGERROR << "timeout while shutting down" ;
return vl::VLE_Timeout ;
}
waitingList.wait(mutex) ;
}
}
return vl::VLE_Success ;
}
vl::ErrorCode ProcessPool::Supervisor::beginTransaction(int tensorIndex)
{
vl::ErrorCode error = vl::VLE_Success ;
SharedTensorSpace::SharedTensorInstance & T = pool.sharedSpace->tensors[tensorIndex] ;
T.transaction ++ ;
T.numChildrenToAccumulate = 0 ;
for (int p = (pool.lab > 0) ; p < peers.size() ; ++p) {
SharedTensorSpace::SharedTensorPeerInstance & PT = pool.sharedSpace->peerTensors[tensorIndex][p] ;
PT.accumulated = false ;
T.numChildrenToAccumulate += 1;
}
asm volatile("": : :"memory") ; // Memory barrier: prevents compiler from reordering
T.state = SharedTensorSpace::accumulateChildren ; // Must be last to close transaction
// Signal the supervisory thread
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
char dummy = 1 ;
write(pipeFD[1], &dummy, 1) ;
}
return error ;
}
vl::ErrorCode ProcessPool::Supervisor::waitTensor(int tensorIndex)
{
SharedTensorSpace::SharedTensorInstance & T = pool.sharedSpace->tensors[tensorIndex] ;
size_t start = vl::getTime() ;
tthread::lock_guard<tthread::mutex> lock(mutex) ;
while (T.state != SharedTensorSpace::ready) {
if ((vl::getTime() - start) > pool.timeoutInterval) {
return vl::VLE_Timeout ;
}
if (state != running) {
return vl::VLE_Unknown ;
}
waitingList.wait(mutex) ;
}
return vl::VLE_Success ;
}
vl::ErrorCode ProcessPool::Supervisor::send(Message & msg, int to)
{
// Find connection to peer.
peers_t::const_iterator rel = std::find(peers.begin(), peers.end(), to) ;
assert(rel != peers.end()) ;
// Add complementery information to the message.
msg.session = session ;
msg.from = pool.lab ;
msg.to = to ;
// Send all bytes.
int bytesWritten = 0 ;
int status ;
char * nextByte = (char*)&msg ;
while (bytesWritten < sizeof(msg)) {
status = write(rel->socketFD, nextByte, sizeof(msg) - bytesWritten) ;
if (status == -1) {
LOGERROR
<< "could not send message to " << to
<< " because '" << strerror(errno) << '\'' ;
return vl::VLE_Unknown ;
}
bytesWritten += status ;
}
LOG(3)
<< "sent message to " << to
<< " (type " << msg.type
<< ", state " << msg.tensorState
<< " tensor " << msg.tensorId
<< ')' ;
return vl::VLE_Success ;
}
vl::ErrorCode ProcessPool::Supervisor::receive(Message & msg, int from, int timeout)
{
size_t waited = 0 ; // us
size_t const pollInterval = 1000 ; // us
if (timeout < 0) { timeout = pool.timeoutInterval ; } // us
// find connection to peer
peers_t::const_iterator rel = std::find(peers.begin(), peers.end(), from) ;
assert(rel != peers.end()) ;
// receive all bytes
{
int bytesRead = 0 ;
int status ;
char * nextByte = (char*)&msg ;
while (bytesRead < sizeof(msg)) {
status = read(rel->socketFD, nextByte, sizeof(msg) - bytesRead) ;
if (status == 0 || status == -1) {
if (status == 0 || errno == EAGAIN) {
if (timeout == 0 && bytesRead == 0) {
// non blocking operation, no message, just return no data
return vl::VLE_NoData ;
}
if (timeout > 0 && waited >= timeout) {
if (verbosity >= 1) {
LOGERROR
<< "timed out while receiving a message from lab " << from
<< " because '" << strerror(errno) << '\'' ;
}
return vl::VLE_Timeout ;
}
usleep(pollInterval) ;
waited += pollInterval ;
continue ;
}
if (verbosity >= 1) {
LOGERROR
<< "error while receiving a message from lab " << from
<< ": '" << strerror(errno) << '\'' ;
}
return vl::VLE_Unknown ;
}
bytesRead += status ;
}
}
// check message integrity
if ((msg.type != Message::init &&
msg.type != Message::initDone)
&& (msg.session != session &&
msg.from != from &&
msg.to != pool.lab)) {
LOGERROR
<< "received an unexpected message from lab " << from
<< "\n\tmsg: session:" << msg.session
<< " from:" << msg.from
<< " to:" << msg.to
<< " type:" << msg.type
<< "\n\tthis session:" << this->session ;
return vl::VLE_Unknown ;
}
LOG(3)
<< "received message from "<<from
<< " (type " << msg.type
<< ", state " << msg.tensorState
<< ", tensor " << msg.tensorId
<< ')' ;
return vl::VLE_Success ;
}
/// Establish connections with the peers.
vl::ErrorCode ProcessPool::Supervisor::connect()
{
vl::ErrorCode error = vl::VLE_Success ;
int result ;
char socketName [256] ;
struct sockaddr_un socketAddress ;
size_t start = vl::getTime() ;
pipeFD[0] = -1 ;
pipeFD[1] = -1 ;
socketFD = -1 ;
// Lock for entire duration of connect()
tthread::lock_guard<tthread::mutex> lock(mutex) ;
// Advertise
state = connecting ;
waitingList.notify_all() ;
// Cerate a pipe FD for notification between MATLAB's thread
// and the supervisory thread. This is needed to allow awaking
// the supervisory thread.
result = pipe(pipeFD) ;
if (result == -1) {
pipeFD[0] = -1 ;
pipeFD[1] = -1 ;
LOGERROR
<< "cannot create inter-threads pipe because: '"
<< strerror(errno) << '\'' ;
return vl::VLE_Unknown ;
}
// Create a socket and connect children.
size_t numChildren = peers.size() - (pool.lab > 0) ;
if (numChildren > 0) {
// Get a UNID comain socket.
snprintf(socketName, sizeof(socketName)/sizeof(socketName[0]),
"/%s/%s-socket-%02d", P_tmpdir, pool.prefix.c_str(), pool.lab) ;
socketFD = socket(AF_UNIX, SOCK_STREAM, 0) ;
if (socketFD == -1) {
LOGERROR
<< "cannot create socket " << socketName
<< "because: " << strerror(errno) ;
return vl::VLE_Unknown ;
}
// Copy socket path into socketAddress.
memset(&socketAddress, 0, sizeof(socketAddress)) ;
socketAddress.sun_family = AF_UNIX;
strncpy(socketAddress.sun_path, socketName,
sizeof(socketAddress.sun_path) - 1) ;
// Delete socket path if it exists before binding.
if (access(socketAddress.sun_path, F_OK) == 0) {
unlink(socketAddress.sun_path) ;
}
// Bind socket to address.
result = bind(socketFD,
(struct sockaddr *)&socketAddress,
sizeof(socketAddress)) ;
if (result == -1) {
LOGERROR
<< "cannot bind socket " << socketName
<< "because: " << strerror(errno) ;
return vl::VLE_Unknown ;
}
// Start listening for children connections
result = listen(socketFD, numChildren) ;
if (result == -1) {
LOGERROR
<< "cannot listen to socket " << socketName
<< "because: " << strerror(errno) ;
return vl::VLE_Unknown ;
}
// Do not block on accept().
fcntl(socketFD, F_SETFL, fcntl(socketFD, F_GETFL, 0) | O_NONBLOCK);
// Accept one connection per child.
for (int p = (pool.lab > 0) ; p < peers.size() ; ++p) {
peers[p].socketFD = -1 ;
for (;;) {
peers[p].socketFD = accept(socketFD, NULL, NULL) ;
if (peers[p].socketFD == -1) {
if (errno == EAGAIN || errno == EWOULDBLOCK) {
if (vl::getTime() < start + pool.timeoutInterval) continue ; // retry
LOGERROR
<< "timed out while accepting connection from peer " << peers[p].lab ;
error = vl::VLE_Timeout ;
goto done ;
}
LOGERROR
<< " cannot accept connection from peer " << peers[p].lab
<< " because: " << strerror(errno) ;
error = vl::VLE_Unknown ;
goto done ;
}
break ;
}
fcntl(peers[p].socketFD, F_SETFL,
fcntl(peers[p].socketFD ,F_GETFL, 0) | O_NONBLOCK) ;
}
}
// Connect parent.
if (pool.lab > 0) {
snprintf(socketName, sizeof(socketName)/sizeof(socketName[0]),
"/%s/%s-socket-%02d", P_tmpdir, pool.prefix.c_str(), peers[0].lab) ;
for (;;) {
peers[0].socketFD = socket(AF_UNIX, SOCK_STREAM, 0) ;
if (peers[0].socketFD == -1) {
if (vl::getTime() < start + pool.timeoutInterval) {
// Wait for parent to create socket file.
usleep(100UL * 1000UL) ; // 100 ms (10 times a second)
continue ;
}
LOGERROR
<< "cannot create socket '" << socketName
<< "' because '" << strerror(errno) << '"' ;
error = vl::VLE_Unknown ;
goto done ;
}
break ;
}
fcntl(peers[0].socketFD, F_SETFL,
fcntl(peers[0].socketFD ,F_GETFL, 0) | O_NONBLOCK) ;
// Copy socket path into socketAddress.
memset(&socketAddress, 0, sizeof(socketAddress)) ;
socketAddress.sun_family = AF_UNIX;
strncpy(socketAddress.sun_path, socketName,
sizeof(socketAddress.sun_path) - 1) ;
// Establish connection with parent.
for (int trials = 0 ; ; ++trials) {
int result = ::connect(peers[0].socketFD,
(struct sockaddr *)&socketAddress,
sizeof(socketAddress)) ;
if (result == 0) break ;
if (vl::getTime() < start + pool.timeoutInterval) {
// Wait for parent to start accepting connections.
usleep(100UL * 1000UL) ; // 100 ms (10 times a second)
continue ;
}
LOGERROR
<< "cannot connect socket " << socketName
<< " after trying " << trials
<< " times because '" << strerror(errno) << '"' ;
error = vl::VLE_Unknown ;
goto done ;
}
}
done:
return error ;
}
void ProcessPool::Supervisor::disconnect()
{
// Lock for entire duration of disconnect()
tthread::lock_guard<tthread::mutex> lock(mutex) ;
for (int p = 0 ; p < peers.size() ; ++p) {
if (peers[p].socketFD != -1) {
close(peers[p].socketFD) ;
peers[p].socketFD = -1 ;
}
}
if (socketFD != -1) {
close(socketFD) ;
socketFD = -1 ;
}
char socketName [256] ;
snprintf(socketName, sizeof(socketName)/sizeof(socketName[0]),
"/%s/%s-socket-%02d", P_tmpdir, pool.prefix.c_str(), pool.lab) ;
unlink(socketName) ;
for (int t = 1 ; t >= 0 ; --t) {
if (pipeFD[t] != -1) {
close(pipeFD[t]) ;
pipeFD[t] = -1 ;
}
}
state = down ;
waitingList.notify_all() ;
}
// The purpose of the handshake sequence is to make sure that
// all processes are properly communicating and ready to go.
// It is also required to synchornize the root (which creates several
// shared resources) and the other nodes (which attach them).
vl::ErrorCode ProcessPool::Supervisor::handshake()
{
Message msg ;
vl::ErrorCode error = vl::VLE_Success ;
// Lock for entire duration of handshake()
tthread::lock_guard<tthread::mutex> lock(mutex) ;
LOG(2) << "handshake begins" ;
// receive message from parent (except for root)
if (pool.lab == 0) {
session = (uint32_t)vl::getTime() ;
// root atteches first
error = pool.sharedSpace->attach(pool.prefix, 0, pool.numLabs) ;
if (error != vl::VLE_Success) {
LOGERROR << "root could not attach the shared space" ;
error = vl::VLE_Unknown ;
goto done ;
}
LOG(2) << "root attached the shared tensor space" ;
} else {
error = receive(msg, peers[0].lab) ;
if (error != vl::VLE_Success || msg.type != Message::init) {
LOGERROR << "did not receive a message from parent" ;
error = vl::VLE_Unknown ;
goto done ;
}
session = msg.session ;
// children attach now
error = pool.sharedSpace->attach(pool.prefix, pool.lab, pool.numLabs) ;
if (error != vl::VLE_Success || msg.type != Message::init) {
LOGERROR << "could not attach shared space" ;
error = vl::VLE_Unknown ;
goto done ;
}
LOG(2) << "child attached the shared tensor space" ;
}
// send message to all children
for (int p = (pool.lab > 0) ; p < peers.size() ; ++p) {
msg.type = Message::init ;
error = send(msg,peers[p].lab) ;
if (error != vl::VLE_Success) {
LOGERROR << "could not send a message to a child" ;
goto done ;
}
}
// receive message from all children
for (int p = (pool.lab > 0) ; p < peers.size() ; ++p) {
error = receive(msg,peers[p].lab) ;
if (error != vl::VLE_Success || msg.type != Message::initDone) {
error = vl::VLE_Unknown ;
goto done ;
}
// now we can identify the child lab index
peers[p].lab = msg.from ;
LOG(2) << "connected lab " << msg.from ;
}
// register peer tensors in the same order as peer[]
for (int p = 0 ; p < peers.size() ; ++p) {
pool.sharedSpace->attachPeer(peers[p].lab) ;
}
// send message to parent (excep for root)
if (pool.lab > 0) {
msg.type = Message::initDone ;
error = send(msg, peers[0].lab) ;
if (error != vl::VLE_Success) {
error = vl::VLE_Unknown ;
goto done ;
}
session = msg.session ;
}
done:
if (error != vl::VLE_Success) {
LOGERROR << "handshake failed" ;
} else {
LOG(2) << "handshake terminated successfully" ;
}
return error ;
}
void ProcessPool::Supervisor::entryPoint()
{
vl::ErrorCode error = vl::VLE_Success ;
// Make sure the supervisory thread operates on the same CUDA device
// as the main thread.
#if ENABLE_GPU
if (pool.sharedSpace->gpuDevice >= 0) {
LOG(2) << "setting CUDA device" ;
hipError_t cerror = hipSetDevice(pool.sharedSpace->gpuDevice) ;
if (cerror != hipSuccess) {
LOGERROR
<< "could not switch supervisory thread to CUDA device "
<< pool.sharedSpace->gpuDevice ;
error = vl::VLE_Cuda ;
} else {
LOG(2) << "supervisory thread switched to CUDA device " << pool.sharedSpace->gpuDevice ;
}
}
#endif
if (error == vl::VLE_Success) {
error = connect() ;
}
if (error == vl::VLE_Success) {
error = handshake() ;
}
if (error == vl::VLE_Success) {
error = loop() ;
}
disconnect() ;
}
vl::ErrorCode ProcessPool::Supervisor::handleAccumulateChildren(int tensorIndex)
{
vl::ErrorCode error = vl::VLE_Success ;
SharedTensorSpace::SharedTensorInstance & T = pool.sharedSpace->tensors[tensorIndex] ;
// Search for children ready to be be accumulated.
for (int p = (pool.lab > 0) ; p < peers.size() && error == vl::VLE_Success ; ++p)
{
int peerLab = peers[p].lab ;
SharedTensorSpace::SharedTensorPeerInstance & PT
= pool.sharedSpace->getPeerTensor(tensorIndex, peerLab) ;
bool thisChildReadyForAccumulation =
PT.transaction == T.transaction &&
PT.state == SharedTensorSpace::waitParent &&
PT.accumulated == false ;
if (thisChildReadyForAccumulation) {
switch (T.descriptor.deviceType) {
case vl::VLDT_CPU: {
switch (T.descriptor.dataType) {
case vl::VLDT_Float:
vl::impl::blas<vl::VLDT_CPU,vl::VLDT_Float>::axpy
(context,
T.descriptor.shape.getNumElements(),
1.0f,
(float*)PT.mappedCpuMemory, 1,
(float*)T.cpuMemory, 1) ;
break ;
case vl::VLDT_Double:
vl::impl::blas<vl::VLDT_CPU,vl::VLDT_Double>::axpy
(context,
T.descriptor.shape.getNumElements(),
1.0,
(double*)PT.mappedCpuMemory, 1,
(double*)T.cpuMemory, 1) ;
break ;
default:
assert(false) ;
break ;
}
break ;
}
case vl::VLDT_GPU: {
#if ENABLE_GPU
hipError_t cerror ;
if (T.gpuMemory == NULL) {
LOGERROR << "internal error: VLDT_GPU memory not allocated for tensor " << T.name ;
error = vl::VLE_Unknown ;
break ;
}
// Copy the copy of the tensor update in the host shared memory map
// to a buffer in the VLDT_GPU.
cerror = hipMemcpyAsync(pool.sharedSpace->gpuDispatchMemory,
PT.mappedCpuMemory,
T.descriptor.getSizeInBytes(),
hipMemcpyHostToDevice,
pool.sharedSpace->gpuHelperStream) ;
if (cerror != hipSuccess) {
LOGERROR
<< "CUDA generated an error while copying data from host to device: "
<< hipGetErrorString(cerror) ;
error = vl::VLE_Cuda ;
break ;
}
// Sum the update to the current tensor vale.
hipStream_t previousStream = context.getCudaHelper().getStream() ;
error = context.getCudaHelper().setStream(pool.sharedSpace->gpuHelperStream) ;
if (error != vl::VLE_Success) {
LOGERROR
<< "CUDA generated an error while switching to a different stream:"
<< context.getLastErrorMessage() ;
break ;
}
switch (T.descriptor.dataType) {
case vl::VLDT_Float:
error = vl::impl::blas<vl::VLDT_GPU,vl::VLDT_Float>::axpy
(context,
T.descriptor.shape.getNumElements(),
1.0f,
(float*)pool.sharedSpace->gpuDispatchMemory, 1,
(float*)T.gpuMemory, 1) ;
break ;
case vl::VLDT_Double:
error = vl::impl::blas<vl::VLDT_GPU,vl::VLDT_Double>::axpy
(context,
T.descriptor.shape.getNumElements(),
1.0,
(double*)pool.sharedSpace->gpuDispatchMemory, 1,
(double*)T.gpuMemory, 1) ;
break ;
default:
assert(false) ;
break ;
}
context.getCudaHelper().setStream(previousStream) ;
if (error != vl::VLE_Success) {
LOGERROR << "summing tensors:" << context.getLastErrorMessage() ;
}
#endif
break ;
}
default:
assert(false) ;
break ;
}
PT.accumulated = true ;
-- T.numChildrenToAccumulate ;
LOG(3)
<< "accumulated child " << PT.lab
<< "; " << T.numChildrenToAccumulate << " remaining" ;
} // next peer
}
if (error != vl::VLE_Success) { return error ; }
// If all children have been accumulated, then
// notify the parent and switch to waitParent state.
// Note that we change the PT state too as the peer
// will switch to that upon receiving the notification.
//
// The root is a special case because it
// does not have a parent, so it can switch
// directly to the waitChildren state. However, in order
// to reuse the generic code above, we also set it
// to waitParent and let the next iteration pick this up.
if (T.numChildrenToAccumulate == 0) {
if (T.descriptor.deviceType == vl::VLDT_GPU) {
#if ENABLE_GPU
hipError_t cerror ;
// Copy the VLDT_GPU tensor to the shared host memory map for other
// processes to use.
cerror = hipMemcpyAsync(T.cpuMemory,
T.gpuMemory,
T.descriptor.getSizeInBytes(),
hipMemcpyDeviceToHost,
pool.sharedSpace->gpuHelperStream) ;
if (cerror != hipSuccess) {
LOGERROR
<< "CUDA error while copying from device to host ("
<< hipGetErrorString(cerror) << ")" ;
return vl::VLE_Cuda ;
}
// Make this operation synchronous in order
// to make sure that other processes will properly read the
// update only when the copy is complete
cerror = hipStreamSynchronize(pool.sharedSpace->gpuHelperStream) ;
if (cerror != hipSuccess) {
LOGERROR
<< "CUDA error while synchronizing a stream: '"
<< hipGetErrorString(cerror) << '\'' ;
return vl::VLE_Cuda ;
}
#endif
}
T.state = SharedTensorSpace::waitParent ;
if (pool.lab > 0) {
int parentLab = peers[0].lab ;
pool.sharedSpace->getPeerTensor(tensorIndex, parentLab).state = SharedTensorSpace::waitParent ;
Message msg ;
msg.type = Message::tensorStateChange ;
msg.tensorId = tensorIndex ;
msg.tensorState = T.state ;
msg.transaction = T.transaction ;
error = send(msg, parentLab) ;
}
}
return error ;
}
vl::ErrorCode ProcessPool::Supervisor::handleWaitParent(int tensorIndex)
{
vl::ErrorCode error = vl::VLE_Success ;
SharedTensorSpace::SharedTensorInstance & T = pool.sharedSpace->tensors[tensorIndex] ;
// Check if parent finished updating. If so, we can copy its value here
// and notify the children to copy us by switching to waitParent state and
// notifying the children. Note that we change the children peer state too
// as these peers will switch to that upon being notified.
if (pool.lab > 0) {
int parentLab = peers[0].lab ;
SharedTensorSpace::SharedTensorPeerInstance & PT
= pool.sharedSpace->getPeerTensor(tensorIndex, parentLab) ;
bool parentDone = (PT.transaction == T.transaction &&
PT.state == SharedTensorSpace::waitChildren) ;
if (!parentDone) {
return vl::VLE_Success ;
}
switch (T.descriptor.deviceType) {
case vl::VLDT_CPU:
memcpy(T.cpuMemory, PT.mappedCpuMemory, T.descriptor.getSizeInBytes()) ;
break ;
case vl::VLDT_GPU: {
#if ENABLE_GPU
hipError_t cerror = hipMemcpyAsync(T.gpuMemory,
PT.mappedCpuMemory,
T.descriptor.getSizeInBytes(),
hipMemcpyHostToDevice,
pool.sharedSpace->gpuHelperStream) ;
if (cerror != hipSuccess) {
LOGERROR
<< "propagating parent to children: CUDA generated an error while copying from host to device: '"
<< hipGetErrorString(cerror) << '\'' ;
error = vl::VLE_Cuda ;
}
#endif
break ;
}
}
if (error != vl::VLE_Success) { return error ; }
}
// We have copied data from parent (or there is no parent at all)
// so we are ready to pass our data to the children and to release
// the parent from waiting on us.
#if ENABLE_GPU
if (T.descriptor.deviceType == vl::VLDT_GPU) {
hipError_t cerror ;
if (peers.size() > (pool.lab > 0)) {
// There are children (i.e. peers other than parent), so copy data to host
// to deliver it to them.
cerror = hipMemcpyAsync(T.cpuMemory,
T.gpuMemory,
T.descriptor.getSizeInBytes(),
hipMemcpyDeviceToHost,
pool.sharedSpace->gpuHelperStream) ;
if (cerror != hipSuccess) {
LOGERROR
<< "CUDA generated an error while copying from device to host: '"
<< hipGetErrorString(cerror) << '\'' ;
error = vl::VLE_Cuda ;
}
}
// Synchronize, so it is safe for children on other processes to read
// the memory. Synchronize even if there are no children, so that inplace
// reads from this process are safe.
cerror = hipStreamSynchronize(pool.sharedSpace->gpuHelperStream) ;
if (cerror != hipSuccess) {
LOGERROR
<< "CUDA gnereated an error while synchronizing a stream: '"
<< hipGetErrorString(cerror) << '\'' ;
return vl::VLE_Cuda ;
}
}
#endif
// Notify the parent that we are done copying its data and the children than we are waiting
// on them to copy our data.
T.state = SharedTensorSpace::waitChildren ;
for (int p = 0 ; p < peers.size() ; ++p) {
int peerLab = peers[p].lab ;
SharedTensorSpace::SharedTensorPeerInstance & PT
= pool.sharedSpace->getPeerTensor(tensorIndex, peerLab) ;
PT.state = (pool.lab > 0 && p == 0) ? SharedTensorSpace::ready : SharedTensorSpace::waitChildren ;
Message msg ;
msg.type = Message::tensorStateChange ;
msg.transaction = T.transaction ;
msg.tensorId = tensorIndex ;
msg.tensorState = (pool.lab > 0 && p == 0) ? SharedTensorSpace::ready : SharedTensorSpace::waitChildren ;
error = send(msg, peerLab) ;
}
return error ;
}
vl::ErrorCode ProcessPool::Supervisor::handleWaitChildren(int tensorIndex)
{
vl::ErrorCode error = vl::VLE_Success ;
SharedTensorSpace::SharedTensorInstance & T = pool.sharedSpace->tensors[tensorIndex] ;
// Check if all children finished updating. If so, we can switch
// to ready state and notify the parent.
// Note that we change the peer children state too
// as these peers will switch to that upon being notified.
bool allChildrenDone = true ;
for (int p = (pool.lab > 0) ; p < peers.size() ; ++p) {
int peerLab = peers[p].lab ;
SharedTensorSpace::SharedTensorPeerInstance & PT
= pool.sharedSpace->getPeerTensor(tensorIndex, peerLab) ;
bool thisChildDone =((PT.transaction == T.transaction &&
PT.state == SharedTensorSpace::ready) ||
PT.transaction > T.transaction) ;
allChildrenDone &= thisChildDone ;
}
if (allChildrenDone) {
tthread::lock_guard<tthread::mutex> lock(mutex) ;
T.state = SharedTensorSpace::ready ;
waitingList.notify_all() ;
}
return error ;
}
vl::ErrorCode ProcessPool::Supervisor::loop()
{
vl::ErrorCode error = vl::VLE_Success ;
LOG(2) << "loop begins" ;
// Advertise. Note that we do not lock extensively in the main
// loop. Syncrhonization with the main thread is kept efficient
// using lock-free mechanisms.
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
state = running ;
waitingList.notify_all() ;
}
int pollStatus = 0 ;
size_t const pollInterval = 499UL ; // allow heartbeats (ms)
size_t const heartbeatInterval = 500UL * 1000UL * 1000UL ; // (ns)
size_t lastHeartbeat = vl::getTime() ;
struct pollfd * polls = new struct pollfd [peers.size() + 1] ;
for (int p = 0 ; p < peers.size() ; ++p) {
polls[p].fd = peers[p].socketFD ;
polls[p].events = POLLIN | POLLHUP | POLLERR | POLLNVAL ;
}
polls[peers.size()].fd = pipeFD[0] ;
polls[peers.size()].events = POLLIN ;
while (error == vl::VLE_Success && forceQuit == false)
{
// Generate regular heartbeats to wake up the main thread at
// regular interval and allow it to time out on
// user commands usch as pull() and push().
size_t now = vl::getTime() ;
if (now > lastHeartbeat + heartbeatInterval) {
waitingList.notify_all() ; // no need to lock
lastHeartbeat = now ;
}
// Wait for incoming messages or a timeout.
pollStatus = poll(polls, peers.size() + 1, pollInterval) ;
if (pollStatus < 0) {
error = vl::VLE_Unknown ;
continue ;
}
// Timeout!
if (pollStatus == 0) {
LOG(1) << "Polling timed out on lab " << pool.sharedSpace->lab ;
// pool.sharedSpace->dump() ;
}
// Check for messages piped from the main thread.
if (polls[peers.size()].revents & POLLIN) {
LOG(3) << "supervisory thread notified by the main thread" ;
char dummy ;
read(pipeFD[0], &dummy, 1) ;
}
// Check for messages from other processes.
for (int p = 0 ; p < peers.size() && error == vl::VLE_Success ; ++ p)
{
// Check for communication errors.
if (polls[p].revents & (POLLHUP | POLLERR | POLLNVAL)) {
LOG(3) << "one of the sockets generated an error, quitting" ;
error = vl::VLE_Unknown ;
break ;
}
// Skip this peer if there is no incoming data.
if ((polls[p].revents & POLLIN) == 0) continue ;
// Receive the message.
Message msg ;
error = receive(msg, peers[p].lab) ;
if (error != vl::VLE_Success) {
LOGERROR << "error while receiving a message from lab " << peers[p].lab ;
break ;
}
// Process the message.
switch (msg.type) {
case Message::tensorStateChange: {
// Record the new state for later.
LOG(3)
<< "received tensor state change from lab " << msg.from
<< " for tensor " << pool.sharedSpace->tensors[msg.tensorId].name.c_str()
<< " to state " << msg.tensorState
<< " for transaction " << msg.transaction ;
SharedTensorSpace::SharedTensorPeerInstance & T
= pool.sharedSpace->getPeerTensor(msg.tensorId, msg.from) ;
T.state = msg.tensorState ;
T.transaction = msg.transaction ;
break ;
}
case Message::requestShutdown: {
peers_t::iterator P = std::find(peers.begin(), peers.end(), msg.from) ;
P->shutdownRequested = true ;
break ;
}
case Message::tensorFinalTransaction: {
peers_t::iterator P = std::find(peers.begin(), peers.end(), msg.from) ;
SharedTensorSpace::SharedTensorInstance & T = pool.sharedSpace->tensors[msg.tensorId];
LOG(3)
<< "received final transaction from lab " << msg.from
<< " for tensor " << T.name.c_str()
<< " to transaction " << msg.finalTransaction ;
int sourcePeer = msg.from ;
if (msg.finalTransaction < T.finalTransaction) {
T.finalTransaction = msg.finalTransaction ;
for (int q = 0 ; q < peers.size() ; ++q) {
if (sourcePeer == peers[q].lab) continue ;
error = send(msg, peers[q].lab) ;
if (error != vl::VLE_Success) {
LOGERROR
<< "error while sending a message to lab "
<< peers[p].lab ;
break ;
}
}
}
break ;
}
default:
// Unexpected message.
LOGERROR << "received an unexpected message" ;
error = vl::VLE_Unknown ;
break ;
}
}
// Check all tensors for actions. Keep updating each tensor until its
// state does not change anymore.
for (int tensorIndex = 0 ; tensorIndex < pool.sharedSpace->tensors.size() && error == vl::VLE_Success ; ++tensorIndex)
{
SharedTensorSpace::SharedTensorState currentState ;
SharedTensorSpace::SharedTensorInstance & T = pool.sharedSpace->tensors[tensorIndex] ;
do {
currentState = T.state ;
LOG(3) << "visiting tensor " << T.name << " in state " << T.state ;
// Detect interruptions
if (T.transaction > T.finalTransaction) {
LOG(1) << "detected interrupded transaction for tensor " << T.name <<
" (transaction:"<<T.transaction<<" > final_transaction:"<<T.finalTransaction<<")";
error = vl::VLE_Interrupted ;
continue ;
}
switch (T.state) {
case SharedTensorSpace::ready:
break ;
case SharedTensorSpace::accumulateChildren:
error = handleAccumulateChildren(tensorIndex) ;
break ;
case SharedTensorSpace::waitParent :
error = handleWaitParent(tensorIndex) ;
break ;
case SharedTensorSpace::waitChildren :
error = handleWaitChildren(tensorIndex) ;
break ;
}
} while (T.state != currentState && error == vl::VLE_Success) ;
}
// Upon shutting down, propagate a message to let other nodes know that
// no further transaction can be processed for each tensor.
if (shutdownRequested && (state == running) && (error == vl::VLE_Success)) {
LOG(3) << "sending final transaction for all tensors" ;
for (int i = 0 ; i < pool.sharedSpace->tensors.size() ; ++i) {
SharedTensorSpace::SharedTensorInstance & tensor = pool.sharedSpace->tensors[i] ;
if (tensor.finalTransaction > tensor.transaction) {
tensor.finalTransaction = tensor.transaction ;
Message msg ;
msg.type = Message::tensorFinalTransaction ;
msg.tensorId = i ;
msg.finalTransaction = tensor.finalTransaction ;
for (int p = 0 ; p < peers.size() ; ++p) {
error = send(msg, peers[p].lab) ;
if (error != vl::VLE_Success) {
LOGERROR
<< "error while sending a message to lab "
<< peers[p].lab ;
break ;
}
}
}
}
}
// Check for other actions.
if (shutdownRequested && (state == running) && (error == vl::VLE_Success)) {
// Check if the children are also in shutdown mode
bool allDone = true ;
for (int p = (pool.lab > 0) ; p < peers.size() ; ++p) {
allDone &= peers[p].shutdownRequested ;
}
if (allDone) {
state = Supervisor::shuttingDown ; // avoid sending the same message again later
if (pool.lab > 0) {
LOG(2) << "subtree ready to shutdown, telling parent lab" ;
Message msg ;
msg.type = Message::requestShutdown ;
error = send(msg, peers[0].lab) ;
} else {
// Other processes will stop when connections are broken.
LOG(2) << "everyone requested shutdown, root lab quitting" ;
break ; // out of poll loop
}
}
}
} // back to poll
LOG(2) << "terminating supervisory thread loop (error = " << error << ')' ;
delete [] polls ;
return error ;
}
/* ---------------------------------------------------------------- */
/* Context */
/* ---------------------------------------------------------------- */
#pragma mark -
ProcessPool processPool ;
/*
Resetting the context here resolves a crash when MATLAB quits and
the ~Context function is implicitly called on unloading the MEX file.
*/
void atExit()
{
processPool.finalize() ;
context.clear() ;
}
/* ---------------------------------------------------------------- */
/* MEX driver */
/* ---------------------------------------------------------------- */
void mexFunction(int nout, mxArray *out[],
int nin, mxArray const *in[])
{
int opt ;
int next = IN_END ;
mxArray const *optarg ;
enum Commands { init, stats, reset, push, pull } command ;
bool inplace = false ;
std::string tensorName ;
std::string prefix = "mcn" ;
mxArray const * arg ;
vl::ErrorCode error = vl::VLE_Success ;
size_t labIndex = 0 ;
size_t numLabs = 0 ;
verbosity = 0 ;
mexAtExit(atExit) ;
/* -------------------------------------------------------------- */
/* Check the arguments */
/* -------------------------------------------------------------- */
if (nin < 1) {
vlmxError(VLMXE_IllegalArgument, "Not enough input arguments.") ;
}
if (!vlmxIsString(in[0], -1)) {
vlmxError(VLMXE_IllegalArgument, "COMMAND is not a string.") ;
}
if (vlmxCompareToStringI(in[0],"init") == 0) {
command = init ;
if (nin < 4) {
vlmxError(VLMXE_IllegalArgument, "Less than three arguments passed to INIT.") ;
}
arg = in[1] ;
if (!vlmxIsPlainScalar(in[2])) {
vlmxError(VLMXE_IllegalArgument, "LABINDEX is not a plain scalar.") ;
}
labIndex = mxGetScalar(in[2]) ;
if (labIndex < 1) {
vlmxError(VLMXE_IllegalArgument, "LABINDEX must be an integer greater than 0.") ;
}
if (!vlmxIsPlainScalar(in[3])) {
vlmxError(VLMXE_IllegalArgument, "NUMLABS is not a plain scalar.") ;
}
numLabs = mxGetScalar(in[3]) ;
if (numLabs < labIndex) {
vlmxError(VLMXE_IllegalArgument, "NUMLABS must be an integer greater or equal to LABINDEX.") ;
}
next = 4 ;
} else if (vlmxCompareToStringI(in[0], "stats") == 0) {
command = stats ;
next = 1 ;
} else if (vlmxCompareToStringI(in[0], "reset") == 0) {
command = reset ;
next = 1 ;
} else if (vlmxCompareToStringI(in[0], "push") == 0) {
if (nin < 3) {
vlmxError(VLMXE_IllegalArgument, "Less than three arguments passed to PUSH.") ;
}
command = push ;
VLMXErrorCode error = vlmxParseString(tensorName, in[1]) ;
if (error != VLMXE_Success) {
vlmxError(error, "NAME is not a string.") ;
}
arg = in[2] ;
next = 3 ;
} else if (vlmxCompareToStringI(in[0], "pull") == 0) {
if (nin < 2) {
mexErrMsgTxt("Less than two arguments passed to PULL.") ;
}
command = pull ;
VLMXErrorCode error = vlmxParseString(tensorName, in[1]) ;
if (error != VLMXE_Success) {
vlmxError(error, "NAME is not a string.") ;
}
next = 2 ;
}
else {
vlmxError(VLMXE_IllegalArgument, "Unknown COMMAND.") ;
}
// optional arguments
while ((opt = vlmxNextOption (in, nin, options, &next, &optarg)) >= 0) {
switch (opt) {
case opt_prefix : {
if (!vlmxIsString(optarg, -1)) {
vlmxError(VLMXE_IllegalArgument, "PREFIX is not a string.") ;
}
char str [512] ;
mxGetString (optarg, str, sizeof(str)/sizeof(str[0])) ;
prefix = str ;
break ;
}
case opt_verbose :
++ verbosity ;
break ;
case opt_inplace :
inplace = true ;
break ;
}
}
switch (command) {
case init:
{
(verbosity >= 2) && mexPrintf("vl_tmove: command 'init'\n") ;
// Initialize shared space. mexInit() may thorow a MEX error;
// the auto_ptr should avoid a leak in this case.
std::auto_ptr<SharedTensorSpace> sharedSpace(new SharedTensorSpace()) ;
sharedSpace->mexInit(arg) ;
// Initialize the pool, including attaching the shared space.
// Now the shared space is owned by the process pool.
error = processPool.init(prefix, labIndex - 1, numLabs, sharedSpace.release()) ;
if (error != vl::VLE_Success) {
mexErrMsgTxt("Could not initialize connections to other MATLAB labs.") ;
}
// At this point, sharedSpace is handled by the ProcessPool thread,
// so we interact with it indirectly
break ;
}
case stats :
(verbosity >= 2) && mexPrintf("vl_tmove: command 'stats'\n") ;
processPool.mexPrint() ;
break ;
case push :
(verbosity >= 2) && mexPrintf("vl_tmove: command 'push' on tensor '%s'%s\n", tensorName.c_str(), inplace?" (inplace)":"") ;
processPool.mexPush(tensorName, arg, inplace) ;
break ;
case pull :
(verbosity >= 2) && mexPrintf("vl_tmove: command 'pull' on tensor '%s'%s\n", tensorName.c_str(),
inplace?" (inplace)":"") ;
out[0] = processPool.mexPull(tensorName, inplace) ;
break ;
case reset :
(verbosity >= 2) && mexPrintf("vl_tmove: command 'reset'\n") ;
processPool.shutdown() ; // gracefully (wait for others to finish)
processPool.finalize() ; // no matter what
break ;
}
}
|
31be4359e894802fc9658e9f6a371f6c07ca87c6.cu
|
/** @file vl_tmove.cu
** @brief MEX internals of vl_tmove.m.
** @author Andrea Vedaldi
**/
/*
Copyright (C) 2016 Andrea Vedaldi.
All rights reserved.
This file is part of the VLFeat library and is made available under
the terms of the BSD license (see the COPYING file).
*/
#include "bits/mexutils.h"
#include "bits/datamex.hpp"
#include "bits/data.hpp"
#if ENABLE_GPU
#include "bits/datacu.hpp"
#endif
#include "bits/impl/tinythread.h"
#include "bits/impl/blashelper.hpp"
#include <assert.h>
#include <errno.h>
#include <stdio.h>
#include <unistd.h>
#include <time.h>
#include <fcntl.h>
#include <poll.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/shm.h>
#include <sys/un.h>
#include <sys/socket.h>
#include <memory>
#include <vector>
#include <algorithm>
#include <sstream>
/**
\file vl_tmove.cu
The `vl_tmove` utility implements an efficient mechanism to exchange
tensor data between different MATLAB processes. Presently, it is
limited to processes running on the same host, but future extensions
can integrate networked environments. Even limited to a single
host, this functionality is important because MATLAB multiple VLDT_GPU
support uses different processess for different GPUs.
The key idea is to implement a reduction tree, in which each MATLAB
process is connected to a parent and a number of children. When a tensor
needs to be accumulated, a node receives copies form the children,
sums them with its local copy, and sends the result to the parent.
Eventually, the data flow reaches the root of the tree and the accumulated
tensor is sent back towards the leaves. This communication mechanism
is designed to reduce the amount of data transfers from O(n^2)
for the trivial n-to-n communication of tensor copies to O(n).
A second strategy used to significantly improve the speed is to allow
the transfer of tensor data to proceed in the background, while MATLAB is busy
running the rest of the network. This is achieved by isolating
all communications in a supervisory thread.
# Notable facts
* Communications between thread uses UNIX-domain sockets (extensible
to INet sockets in the future). These are used to send lightweight
cohordination messages.
* Data passing on local machines uses a shared memory map between
processes. The shared memory contains a copy of each tensor for each
process. VLDT_GPU tensors may either be allocated internally
by `vl_tmove` (in which case MATLAB may forget them)
or may remember pointers to MATLAB's memory (inplace).
The latter is slightly unsafe, but much faster as it saves several copies.
In any case, `vl_tmove` allocates a VLDT_GPU buffer as large as
the largest tensor as scratch space (and for direct VLDT_GPU communication).
* The supervisory and main threads collaborate through lock-less
synchronization for speed. This is possible because at any point in time
each tensor is managed by only one thread depending on its state.
Thus a tensor moves from one thread to the other simply by swapping
its state. There is, however, a condition variable to allow the
main thread to wait for the supervisory thread when needed.
* The supervisory thread waits by calling `poll()` on a number of sockets.
However, sometimes the main thread needs to signal the supervisor too.
This is realized by having a dummy `pipe()` between the two
threads.
**/
/* ---------------------------------------------------------------- */
/* Globals */
/* ---------------------------------------------------------------- */
enum {
IN_COMMAND, IN_END
} ;
enum {
OUT_RESULT, OUT_END
} ;
/* option codes */
enum {
opt_inplace = 0,
opt_verbose,
opt_prefix,
} ;
/* options */
VLMXOption options [] = {
{"prefix", 1, opt_prefix },
{"InPlace", 0, opt_inplace },
{"Verbose", 0, opt_verbose },
{0, 0, 0 }
} ;
int verbosity = 0 ;
vl::MexContext context ;
class SharedTensorDescriptor ;
class SharedTensorSpace ;
class ProcessPool ;
/* ---------------------------------------------------------------- */
/* Utility */
/* ---------------------------------------------------------------- */
static VLMXErrorCode vlmxParseDataType(vl::DataType & dataType, mxArray const * arg)
{
if (vlmxCompareToStringI(arg, "double") == 0) {
dataType = vl::VLDT_Double ;
return VLMXE_Success ;
} else if (vlmxCompareToStringI(arg, "single") == 0) {
dataType = vl::VLDT_Float ;
return VLMXE_Success ;
} else {
return VLMXE_IllegalArgument ;
}
}
static VLMXErrorCode vlmxParseDeviceType(vl::DeviceType & deviceType, mxArray const * arg)
{
if (vlmxCompareToStringI(arg, "cpu") == 0) {
deviceType = vl::VLDT_CPU ;
return VLMXE_Success ;
} else if (vlmxCompareToStringI(arg, "VLDT_GPU") == 0) {
deviceType = vl::VLDT_GPU ;
return VLMXE_Success ;
} else {
return VLMXE_IllegalArgument ;
}
}
static VLMXErrorCode vlmxParseString(std::string & name, mxArray const * arg)
{
char str [256] ;
if (!vlmxIsString(arg, -1)) {
return VLMXE_IllegalArgument ;
}
mxGetString(arg, str, sizeof(str)) ;
name = str ;
return VLMXE_Success ;
}
static VLMXErrorCode vlmxParseTensorShape(vl::TensorShape & shape, mxArray const * arg)
{
size_t dimensions [32] ;
if (!vlmxIsVector(arg, -1) || !vlmxIsPlain(arg)) {
return VLMXE_IllegalArgument ;
}
int nd = mxGetNumberOfElements(arg) ;
for (int k = 0 ; k < nd ; ++k) { dimensions[k] = (size_t)mxGetPr(arg)[k] ; }
shape.setDimensions(dimensions, nd) ;
return VLMXE_Success ;
}
/* ---------------------------------------------------------------- */
/* Logger */
/* ---------------------------------------------------------------- */
namespace vl {
class Logger
{
public:
Logger() ;
~Logger() ;
std::ostringstream & getStream() ;
protected:
std::ostringstream stringStream ;
private:
// Disable
Logger(const Logger&) ;
Logger& operator= (const Logger&) ;
} ;
}
vl::Logger::Logger()
{ }
vl::Logger::~Logger()
{
printf("%s\n", stringStream.str().c_str()) ;
//fflush(stdout) ;
}
std::ostringstream &
vl::Logger::getStream()
{
return stringStream ;
}
#define LOGERROR \
vl::Logger().getStream() \
<<"[error]"<<__func__<<"::lab "<<lab<<"::"
#define LOG(level) \
if (verbosity < level) { } \
else vl::Logger().getStream() \
<<"[info] "<<__func__<<"::lab "<<lab<<"::"
/* ---------------------------------------------------------------- */
/* SharedTensorDescriptor */
/* ---------------------------------------------------------------- */
#pragma mark -
// Describe one of the shared tensors: shape, data type,
// and device type.
class SharedTensorDescriptor
{
public:
SharedTensorDescriptor() ;
~SharedTensorDescriptor() ;
void init(vl::DeviceType deviceType,
vl::DataType dataType,
vl::TensorShape const & shape) ;
void finalize() ;
size_t getSizeInBytes() const ;
SharedTensorDescriptor & operator=(SharedTensorDescriptor const & tensor) ;
// Data.
vl::DeviceType deviceType ;
vl::DataType dataType ;
vl::TensorShape shape ;
} ;
SharedTensorDescriptor::SharedTensorDescriptor()
{ }
SharedTensorDescriptor::~SharedTensorDescriptor()
{
finalize() ;
}
SharedTensorDescriptor &
SharedTensorDescriptor::operator=(SharedTensorDescriptor const & tensor)
{
deviceType = tensor.deviceType ;
dataType = tensor.dataType ;
shape = tensor.shape ;
return *this ;
}
void SharedTensorDescriptor::init(vl::DeviceType newDeviceType,
vl::DataType newDataType,
vl::TensorShape const & newShape)
{
assert(newDeviceType == vl::VLDT_CPU || newDeviceType == vl::VLDT_GPU) ;
assert(newDataType == vl::VLDT_Float || newDataType == vl::VLDT_Double) ;
deviceType = newDeviceType ;
dataType = newDataType ;
shape = newShape ;
}
void SharedTensorDescriptor::finalize()
{ }
size_t SharedTensorDescriptor::getSizeInBytes() const
{
return shape.getNumElements() * getDataTypeSizeInBytes(dataType) ;
}
/* ---------------------------------------------------------------- */
/* SharedTensorSpace */
/* ---------------------------------------------------------------- */
#pragma mark -
// SharedTensorSpace holds a list of tensors that can be accumulated
// between different processes.
//
// It encapsualtes in particular: the shared memory map,
// the VLDT_GPU dispatch buffer, and, possibly, for non-inplace operations
// and VLDT_GPU arrays, a copy of the VLDT_GPU data.
//
// This class is not thread safe, so the MATLAB and flow supervisor thread
// must properly syncrhonize in accessing it.
class SharedTensorSpace
{
public:
SharedTensorSpace() ;
~SharedTensorSpace() ;
vl::ErrorCode mexInit(mxArray const *mexDescriptor) ;
void finalize() ;
vl::ErrorCode attach(std::string const & prefix, int lab, int numLabs) ;
vl::ErrorCode attachPeer(int lab) ;
void mexPrint() const ;
void dump() const ;
private:
bool initialized ;
int lab ;
int numLabs ;
enum SharedTensorState {
ready,
accumulateChildren,
waitParent,
waitChildren,
} state ;
// This class represents an instance of a shared tensor. It contain
// its state@transaction pair and information on its memory location.
struct SharedTensorInstance
{
std::string name ;
SharedTensorDescriptor descriptor ;
SharedTensorState state ;
size_t transaction ;
size_t finalTransaction ;
int numChildrenToAccumulate ;
size_t memoryMapOffset ;
void * cpuMemory ;
void * gpuMemory ;
bool gpuMemoryIsOwned ;
#if ENABLE_GPU
cudaEvent_t gpuEvent ;
bool gpuEventIsInitialized ;
#endif
bool operator==(std::string const & theName) { return name == theName ; }
SharedTensorInstance()
: state(ready), transaction(0), finalTransaction((size_t)-1),
cpuMemory(NULL), gpuMemory(NULL), gpuMemoryIsOwned(false)
#if ENABLE_GPU
, gpuEvent(0), gpuEventIsInitialized(false)
#endif
{ }
} ;
typedef std::vector<SharedTensorInstance> tensors_t ;
tensors_t tensors ;
struct SharedTensorPeerInstance
{
int lab ;
SharedTensorState state ;
size_t transaction ;
size_t finalTransaction ;
void *mappedCpuMemory ;
void *mappedGpuMemory ;
bool accumulated ;
bool operator==(int theLab) { return lab == theLab ; }
SharedTensorPeerInstance()
: lab(-1), state(ready), transaction(0),
mappedCpuMemory(NULL), mappedGpuMemory(NULL), accumulated(false),
finalTransaction((size_t)-1) { }
} ;
typedef std::vector<std::vector<SharedTensorPeerInstance> > peerTensors_t ;
peerTensors_t peerTensors ;
SharedTensorPeerInstance & getPeerTensor(int tensorIndex, int lab) ;
// Shared CPU memory
void * memoryMap ;
size_t memoryMapSize ;
size_t memoryMapLabStride ;
std::string memoryMapName ;
int memoryMapFD ;
bool memoryMapIsCudaRegistered ;
// Additional VLDT_GPU memory
void * gpuDispatchMemory ;
int gpuDevice ;
#if ENABLE_GPU
// Todo: one for each mapped peer dispatch memory
cudaIpcMemHandle_t gpuMemoryHandle ;
cudaStream_t gpuHelperStream ;
cudaEvent_t gpuHelperEvent ;
bool gpuHelperStreamInitialized ;
bool gpuHelperEventInitialized ;
#endif
friend class ProcessPool ;
} ;
SharedTensorSpace::SharedTensorSpace()
: initialized(false),
memoryMapFD(-1),
memoryMap(NULL),
memoryMapIsCudaRegistered(false),
memoryMapSize(0),
gpuDevice(-1),
gpuDispatchMemory(NULL)
#if ENABLE_GPU
, gpuHelperStream(0),
gpuHelperStreamInitialized(false),
gpuHelperEventInitialized(false)
#endif
{ }
SharedTensorSpace::~SharedTensorSpace()
{
finalize() ;
}
// This function initializes the SharedTensorSpace using
// a MATLAB cell array as descriptor for the space content.
// It can throw a MEX error, so it must be called from
// the MATLAB thread.
vl::ErrorCode SharedTensorSpace::mexInit(mxArray const *descriptor)
{
assert(descriptor) ;
if (initialized) {
mexErrMsgTxt("Already initialized. Use 'reset' to clear.") ;
}
lab = -1 ;
numLabs = 0 ;
memoryMapName = "" ;
memoryMapSize = 0 ;
memoryMapLabStride = 0 ;
// Parse tensor list
if (!mxIsCell(descriptor)) {
mexErrMsgTxt("DESCRIPTOR is not a cell array.") ;
}
if (mxGetNumberOfDimensions(descriptor) != 2) {
mexErrMsgTxt("DESCRIPTOR does not have two dimensions.") ;
}
if (mxGetN(descriptor) != 3 &&
mxGetN(descriptor) != 4) {
mexErrMsgTxt("DESCRIPTOR does not have three or four columns.") ;
}
size_t numTensors = mxGetM(descriptor) ;
size_t offset = 0 ;
size_t const alignFactor = 16 ;
bool useGPU = false ;
for (int i = 0 ; i < numTensors ; ++i) {
VLMXErrorCode error ;
vl::DeviceType deviceType = vl::VLDT_CPU ;
vl::DataType dataType ;
vl::TensorShape shape ;
std::string name ;
error = vlmxParseDataType(dataType, mxGetCell(descriptor, 0*numTensors + i)) ;
if (error != VLMXE_Success) {
vlmxError(error, "DESCRIPTOR{%d,1} is not a valid data type.", i+1) ;
}
error = vlmxParseTensorShape(shape, mxGetCell(descriptor, 1*numTensors + i)) ;
if (error != VLMXE_Success) {
vlmxError(error, "DESCRIPTOR{%d,2} is not a valid tensor shape.", i+1) ;
}
error = vlmxParseString(name, mxGetCell(descriptor, 2*numTensors + i)) ;
if (error != VLMXE_Success) {
vlmxError(error, "DESCRIPTOR{%d,3} is not a valid tensor name.", i+1) ;
}
if (mxGetN(descriptor) == 4) {
error = vlmxParseDeviceType(deviceType, mxGetCell(descriptor, 3*numTensors + i)) ;
if (error != VLMXE_Success) {
vlmxError(error, "DESCRIPTOR{%d,4} is not a valid device type name.", i+1) ;
}
}
if (deviceType == vl::VLDT_GPU) {
#if not defined(ENABLE_GPU)
vlmxError(VLMXE_IllegalArgument, "VLDT_GPU support not compiled.") ;
#endif
useGPU = true ;
}
// Add the new tensor to the table.
{
SharedTensorInstance tensor ;
tensor.name = name ;
tensor.descriptor.init(deviceType, dataType, shape) ;
tensor.memoryMapOffset = offset ;
tensors.push_back(tensor) ;
offset +=
vl::divideAndRoundUp(tensor.descriptor.getSizeInBytes(), alignFactor) * alignFactor ;
if (verbosity >= 2) {
mexPrintf("[info] %s: registered tensor %s\n", __func__, name.c_str()) ;
}
}
}
// Size of the memory allocated for one lab (with a copy of all tensors).
memoryMapName = "/mcn" ;
size_t const pageSize = getpagesize() ;
memoryMapLabStride = vl::divideAndRoundUp(offset, pageSize) * pageSize ;
memoryMapSize = 0 ;
#if ENABLE_GPU
if (useGPU) {
cudaGetDevice(&gpuDevice) ; // to inform thread
LOG(2) << "current CUDA device: " << gpuDevice ;
}
#endif
initialized = true ;
return vl::VLE_Success ;
}
// Get the peer tensor corresponding to a given
// tensor and process index.
SharedTensorSpace::SharedTensorPeerInstance &
SharedTensorSpace::getPeerTensor(int tensorIndex, int lab)
{
std::vector<SharedTensorPeerInstance>::iterator PT
= std::find(peerTensors[tensorIndex].begin(), peerTensors[tensorIndex].end(), lab) ;
assert(PT != peerTensors[tensorIndex].end()) ;
return *PT ;
}
/// Attach the shared space. This allocates the shared memory map
/// for inter-process data transfers containing all tensors,
/// and the VLDT_GPU dispatch memory.
vl::ErrorCode SharedTensorSpace::attach(std::string const & prefix, int lab, int numLabs)
{
int error ;
this->lab = lab ;
this->numLabs = numLabs ;
// Create the memory map name from the prefix.
memoryMapName = std::string("/") + prefix ;
// The root lab deletes a pre-existing memory object, if any.
if (lab == 0) {
error = shm_unlink(memoryMapName.c_str()) ;
if (error == -1) {
switch (errno) {
case ENOENT:
// Fine, there wasn't such a memory map anyways.
break ;
default:
LOGERROR
<< "could not delete the stale memory map '"
<< memoryMapName.c_str()
<< "' because '" << strerror(errno) << '\'' ;
return vl::VLE_Unknown ;
}
}
}
// Open/create the shared memory file descriptor.
memoryMapSize = memoryMapLabStride * numLabs ;
memoryMapFD = shm_open(memoryMapName.c_str(),
(lab == 0 ? O_CREAT:0)| O_RDWR, S_IRUSR | S_IWUSR) ;
if (memoryMapFD == -1) {
LOGERROR << "shm_open() failed because " << strerror(errno) ;
close(memoryMapFD) ;
memoryMapFD = -1 ;
return vl::VLE_Unknown ;
}
// The root process set the size of the shared memory.
if (lab == 0) {
if (ftruncate(memoryMapFD, memoryMapSize) == -1) {
LOGERROR << "truncate failed because " << strerror(errno) ;
return vl::VLE_OutOfMemory ;
}
}
// Map the memory.
memoryMap = mmap(0, memoryMapSize,
PROT_READ | PROT_WRITE, MAP_SHARED,
memoryMapFD, 0) ;
if (memoryMap == MAP_FAILED) {
LOGERROR << "mmap failed because " << strerror(errno) ;
memoryMap = NULL ;
close(memoryMapFD) ;
memoryMapFD = -1 ;
return vl::VLE_Unknown ;
}
memoryMapIsCudaRegistered = false ;
// The FD is not needed after mmap.
close(memoryMapFD) ;
memoryMapFD = -1 ;
// Associate memory to tensors.
#if ENABLE_GPU
size_t maxGPUTensorSize = 0 ;
#endif
for (int t = 0 ; t < tensors.size() ; ++t) {
tensors[t].cpuMemory = (char*)memoryMap
+ tensors[t].memoryMapOffset
+ lab * memoryMapLabStride ;
#if ENABLE_GPU
if (tensors[t].descriptor.deviceType == vl::VLDT_GPU) {
// Lazy allocation (to allow inplace operations).
tensors[t].gpuMemory = NULL ;
tensors[t].gpuMemoryIsOwned = false ;
maxGPUTensorSize = std::max(maxGPUTensorSize,
tensors[t].descriptor.getSizeInBytes()) ;
cudaError_t cerror = cudaEventCreate(&tensors[t].gpuEvent) ;
if (cerror != cudaSuccess) {
LOGERROR
<< "CUDA could not create an event because '"
<< cudaGetErrorString(cerror) << '\'' ;
return vl::VLE_Cuda ;
}
tensors[t].gpuEventIsInitialized = true ;
}
#endif
}
#if ENABLE_GPU
if (maxGPUTensorSize > 0) {
cudaError_t cerror ;
cerror = cudaMalloc(&gpuDispatchMemory, maxGPUTensorSize) ;
if (cerror != cudaSuccess) {
LOGERROR
<< "could not allocate VLDT_GPU memory for dispatch because '"
<< cudaGetErrorString(cerror) << '\'' ;
gpuDispatchMemory = NULL ;
return vl::VLE_Cuda ;
}
// To parallelize memory transfers we use a separate CUDA stream.
cerror = cudaStreamCreateWithFlags(&gpuHelperStream, cudaStreamNonBlocking) ;
if (cerror != cudaSuccess) {
LOGERROR
<< "could not create a CUDA stream because '"
<< cudaGetErrorString(cerror) << '\'' ;
return vl::VLE_Cuda ;
}
gpuHelperStreamInitialized = true ;
// Pin all shared host memory.
cerror = cudaHostRegister(memoryMap,
memoryMapSize,
cudaHostRegisterDefault) ;
if (cerror != cudaSuccess) {
LOGERROR
<< "CUDA generated an error while pinning the shared host memory: '"
<< cudaGetErrorString(cerror) << '\'' ;
} else {
LOG(2) << "pinned shared memory" ;
memoryMapIsCudaRegistered = true ;
}
}
#endif
return vl::VLE_Success ;
}
// attachPeer
vl::ErrorCode
SharedTensorSpace::attachPeer(int lab)
{
if (peerTensors.size() != tensors.size()) {
peerTensors.resize(tensors.size()) ;
}
for (int t = 0 ; t < tensors.size() ; ++t) {
SharedTensorPeerInstance peerTensor ;
peerTensor.lab = lab ;
peerTensor.state = SharedTensorSpace::ready ;
peerTensor.mappedCpuMemory = (char*)memoryMap
+ tensors[t].memoryMapOffset
+ lab * memoryMapLabStride ;
peerTensor.accumulated = false ;
peerTensors[t].push_back(peerTensor) ;
}
return vl::VLE_Success ;
}
// Destroy all resources
// 1) unmap and unlink shared memory map
// 2) ...
void SharedTensorSpace::finalize()
{
int error ;
initialized = false ;
#if ENABLE_GPU
if (memoryMap && memoryMapIsCudaRegistered) {
cudaHostUnregister(memoryMap) ;
}
// if (gpuHelperEventInitialized) {
// cudaEventDestroy(gpuHelperEvent) ;
// gpuHelperEventInitialized = false ;
// }
if (gpuHelperStreamInitialized) {
cudaStreamDestroy(gpuHelperStream) ;
gpuHelperStream = 0 ;
gpuHelperStreamInitialized = false ;
}
if (gpuDispatchMemory) {
cudaFree(gpuDispatchMemory) ;
gpuDispatchMemory = NULL ;
}
for (tensors_t::iterator T = tensors.begin() ;
T != tensors.end() ;
T++)
{
if (T->gpuMemory && T->gpuMemoryIsOwned) {
cudaFree(T->gpuMemory) ;
T->gpuMemory = NULL ;
T->gpuMemoryIsOwned = false ;
}
if (T->gpuEventIsInitialized) {
cudaEventDestroy(T->gpuEvent) ;
T->gpuEvent = 0 ;
T->gpuEventIsInitialized = false ;
}
}
gpuDevice = -1 ;
#endif
if (memoryMap) {
munmap(memoryMap, memoryMapSize) ;
memoryMap = NULL ;
}
if (memoryMapFD != -1) {
// This should have beeen closed right after mmap().
close(memoryMapFD) ;
memoryMapFD = -1 ;
}
error = shm_unlink(memoryMapName.c_str()) ;
if (error == -1 && errno == EACCES) {
LOGERROR << "Cannot clear the shared memory map due to a permission error." ;
}
tensors.clear() ;
numLabs = -1 ;
}
// For debugging
void SharedTensorSpace::dump() const
{
for (int tensorIndex = 0 ; tensorIndex < tensors.size() ; ++tensorIndex) {
SharedTensorInstance const & T = tensors[tensorIndex] ;
char const * stateString ;
switch (T.state) {
case ready: stateString="ready" ; break ;
case accumulateChildren: stateString="accumulateChildren" ; break ;
case waitParent: stateString="waitParent" ; break ;
case waitChildren: stateString="waitChildren" ; break ;
}
LOG(0)<<"Tensor " << T.name ;
LOG(0)<<"\tState: " << stateString ;
LOG(0)<<"\ttransaction: "<<T.transaction ;
if (peerTensors.size() > tensorIndex) {
for (int p = 0 ; p < peerTensors[tensorIndex].size() ; ++p) {
SharedTensorPeerInstance const & PT = peerTensors[tensorIndex][p] ;
switch (PT.state) {
case ready: stateString="ready" ; break ;
case accumulateChildren: stateString="accumulateChildren" ; break ;
case waitParent: stateString="waitParent" ; break ;
case waitChildren: stateString="waitChildren" ; break ;
}
LOG(0)<<"\tPeer on lab " << PT.lab << ": " << stateString;
LOG(0)<<"\t\ttransaction:" << PT.transaction ;
}
}
}
}
void SharedTensorSpace::mexPrint() const
{
mexPrintf("\tlab %d of %d\n", lab, numLabs) ;
mexPrintf("\tshared memory: '%s', %d bytes mapped at address: 0x%zx\n",
memoryMapName.c_str(),memoryMapSize,memoryMap) ;
for (int tensorIndex = 0 ; tensorIndex < tensors.size() ; ++tensorIndex) {
SharedTensorInstance const & T = tensors[tensorIndex] ;
mexPrintf("\tTensor '%s'\n", T.name.c_str()) ;
mexPrintf("\t\t[") ;
for (int k = 0 ; k < T.descriptor.shape.getNumDimensions() ; ++k) {
mexPrintf(" %d", T.descriptor.shape.getDimensions()[k]) ;
}
mexPrintf("] %s %s\n",
T.descriptor.dataType == vl::VLDT_Double?"double":"single",
T.descriptor.deviceType == vl::VLDT_CPU?"CPU":"VLDT_GPU") ;
mexPrintf("\t\tCPU address: 0x%zx\n", T.cpuMemory) ;
mexPrintf("\t\tGPU address: 0x%zx\n", T.gpuMemory) ;
if (peerTensors.size() > tensorIndex) {
for (int p = 0 ; p < peerTensors[tensorIndex].size() ; ++p) {
SharedTensorPeerInstance const & PT = peerTensors[tensorIndex][p] ;
mexPrintf("\t\tPeer instance %d\n", p) ;
mexPrintf("\t\t\tlab: %0d\n", PT.lab) ;
mexPrintf("\t\t\tmapped CPU address: 0x%zx\n",PT.mappedCpuMemory) ;
}
}
}
}
/* ---------------------------------------------------------------- */
/* ProcessPool */
/* ---------------------------------------------------------------- */
#pragma mark -
/// Represents a pool of collaborating MATLAB processes. Usually each
/// process corresponds to a certain MATLAB instance in a MATLAB pool.
class ProcessPool
{
public:
/// Create an un-intialized ProcessPool. Before it is used,
/// the pool must be initialized using init(). This design allows
/// to catch errors during initialization without resorting to exceptions.
ProcessPool() ;
/// Automatically calls ::finalize().
~ProcessPool() ;
/// Initialize the instance \a lab of \a numLabs pools. The function
/// timesout.
vl::ErrorCode init(std::string const & prefix, int lab,
int numLabs, SharedTensorSpace * space) ;
/// Gracefully shutdown the connection with the other processes,
/// waiting for them to finish updating as needed. After this, the
/// supervisory thread quits, but the object remains initialized
/// to allow reading off the final value of the tensor.
///
/// The function timesout.
vl::ErrorCode shutdown() ;
/// Immediately terminate the ProcessPool instance and release all
/// resources.
void finalize() ;
/// Print information.
///
/// This function must be called from the MATLAB thread.
void mexPrint() const ;
/// Push a tensor in the pool for accumulation.
///
/// This function must be called from the MATLAB thread. It throws
/// a MEX error on error and can time out.
void mexPush(std::string const & name, mxArray const * x,
bool inplace = false) ;
/// Pull an accumulated tensor from the pool.
///
/// This function must be called from the MATLAB thread. It throws
/// a MEX error on error and an time out.
mxArray * mexPull(std::string const & name, bool inplace = false) ;
/// Check whether the instance is intialized or not.
bool isInitialized() const { return initialized ; }
private:
bool initialized ;
std::string prefix ;
int lab ;
int numLabs ;
size_t timeoutInterval ;
SharedTensorSpace * sharedSpace ;
// Messages between peer processes.
struct Message
{
enum MessageType {
/// Sent from root to leaves to request initialization during
/// hanshake.
init,
/// Sent from leaves to root to acknowledge initialization.
initDone,
/// Sent from root to leaves to request attching the shared
/// resources (shared memory).
attach,
/// Sent to advertise a state change for a tensor.
tensorStateChange,
/// Shutdown sequence
requestShutdown,
/// Communicate the final transaction index for quitting.
tensorFinalTransaction
}
type ;
/// The transaction number.
size_t transaction ;
/// The final transaction number.
size_t finalTransaction ;
// Sender and destination process indexes.
int16_t from ;
int16_t to ;
// Session identifier, used for sanity checks.
uint32_t session ;
// Tensort ID and state for a tensor state change.
uint32_t tensorId ;
SharedTensorSpace::SharedTensorState tensorState ;
Message() : transaction(0), finalTransaction((size_t)-1), tensorId(0) { }
} ;
class Supervisor {
public:
Supervisor(ProcessPool& pool)
: pool(pool), thread(NULL), state(down),
socketFD(-1) { pipeFD[0] = -1 ; pipeFD[1] = -1 ; }
~Supervisor() { finalize() ; }
vl::ErrorCode init() ;
void finalize() ;
vl::ErrorCode shutdown() ;
vl::ErrorCode beginTransaction(int tensorIndex) ;
vl::ErrorCode waitTensor(int tensorIndex) ;
private:
ProcessPool & pool ;
tthread::thread * thread ;
enum State {
connecting,
running,
shuttingDown,
down} state ;
// Peer processes.
struct Peer
{
int lab ;
int socketFD ;
bool cudaCanAccessPeer ; //cudaDeviceCanAccessPeer
bool shutdownRequested ;
Peer(int lab)
: lab(lab), socketFD(-1),
cudaCanAccessPeer(false),
shutdownRequested(false)
{ }
bool operator== (int lab) { return this->lab == lab ; }
} ;
typedef std::vector<Peer> peers_t ;
peers_t peers ;
// Comms.
uint32_t session ;
int pipeFD [2] ;
int socketFD ;
tthread::mutex mutex ;
tthread::condition_variable waitingList ;
bool shutdownRequested ; // local
bool forceQuit ;
static void threadEntryPoint(void * thing) ;
void entryPoint() ;
vl::ErrorCode connect() ;
void disconnect() ;
vl::ErrorCode handshake() ;
vl::ErrorCode loop() ;
vl::ErrorCode send(Message &msg, int to) ;
vl::ErrorCode receive(Message &msg, int from, int timeout = -1) ;
vl::ErrorCode handleAccumulateChildren(int tensorIndex) ;
vl::ErrorCode handleWaitParent(int tensorIndex) ;
vl::ErrorCode handleWaitChildren(int tensorIndex) ;
} supervisor ;
} ;
ProcessPool::ProcessPool()
: supervisor(*this),
initialized(false),
lab(-1), numLabs(0)
{ }
ProcessPool::~ProcessPool()
{
finalize() ;
}
vl::ErrorCode ProcessPool::init(std::string const & newPrefix, int newLab, int newNumLabs, SharedTensorSpace * newSharedSpace)
{
vl::ErrorCode error ;
assert(newLab >= 0) ;
assert(newNumLabs > newLab) ;
assert(newSharedSpace) ;
// finalize process pool if previously initialized
finalize() ;
// set members
prefix = newPrefix ;
lab = newLab ;
numLabs = newNumLabs ;
sharedSpace = newSharedSpace ;
timeoutInterval = 30UL * 1000UL * 1000UL ; // 30s in us
error = supervisor.init() ;
if (error == vl::VLE_Success) {
initialized = true ;
}
return error ;
}
vl::ErrorCode ProcessPool::shutdown()
{
return supervisor.shutdown() ;
}
void ProcessPool::finalize()
{
supervisor.finalize() ;
if (sharedSpace) {
sharedSpace->finalize() ;
delete sharedSpace ;
sharedSpace = NULL ;
}
lab = -1 ;
numLabs = 0 ;
initialized = false ;
}
void ProcessPool::mexPrint() const
{
tthread::lock_guard<tthread::mutex> (mutex) ;
if (sharedSpace) {
sharedSpace->mexPrint() ;
} else {
mexPrintf("Uninitialized.") ;
}
}
void ProcessPool::mexPush(std::string const & name,
mxArray const * x,
bool inplace)
{
// Search tensor by name.
SharedTensorSpace::tensors_t::iterator T
= std::find(sharedSpace->tensors.begin(), sharedSpace->tensors.end(), name) ;
if (T == sharedSpace->tensors.end()) {
vlmxError(VLMXE_IllegalArgument, "There is no tensor '%s'.", name.c_str()) ;
}
// Encapsulate MATLAB argument and check tensor compatibility.
vl::MexTensor mtens(context) ;
mtens.init(x) ;
if (mtens.getDeviceType() != T->descriptor.deviceType) {
vlmxError(VLMXE_IllegalArgument, "The tensor device type is incorrect.") ;
}
if (mtens.getDataType() != T->descriptor.dataType) {
vlmxError(VLMXE_IllegalArgument, "The tensor data type is incorrect.") ;
}
if (mtens.getNumElements() != T->descriptor.shape.getNumElements()) {
vlmxError(VLMXE_IllegalArgument, "The tensor shape is incorrect.") ;
}
if (inplace && T->descriptor.deviceType != vl::VLDT_GPU) {
vlmxError(VLMXE_IllegalArgument, "Inplace operations are supported only for VLDT_GPU arrays.") ;
}
// Wait until the tensor is in ready state
vl::ErrorCode error = supervisor.waitTensor(T - sharedSpace->tensors.begin()) ;
if (error != vl::VLE_Success) {
vlmxError(VLMXE_Execution, "Timeout or disconnected while waiting for tensor '%s' to become ready.", T->name.c_str()) ;
}
// Copy memory to SharedSpace
if (T->descriptor.deviceType == vl::VLDT_CPU) {
memcpy(T->cpuMemory, mtens.getMemory(), T->descriptor.getSizeInBytes()) ;
} else {
#if ENABLE_GPU
cudaError_t cerror ;
// sync main thread (do not start until the parameters have been computed!)
cudaEventRecord(T->gpuEvent, 0) ;
cudaStreamWaitEvent(sharedSpace->gpuHelperStream, T->gpuEvent, 0) ;
if (inplace) {
if (T->gpuMemoryIsOwned && T->gpuMemory) {
// Free the previously allocated memory as we are going to use
// an inplace operation on this tensor.
cudaFree(T->gpuMemory) ;
T->gpuMemory = NULL ;
}
T->gpuMemoryIsOwned = false ;
T->gpuMemory = mtens.getMemory() ;
} else {
if (T->gpuMemoryIsOwned == false || T->gpuMemory == NULL) {
cerror = cudaMalloc(&T->gpuMemory,
T->descriptor.getSizeInBytes()) ;
if (cerror != cudaSuccess) {
T->gpuMemory = NULL ;
T->gpuMemoryIsOwned = false ;
vlmxError(VLMXE_Alloc, "CUDA error while allocating VLDT_GPU memory (%s).",
cudaGetErrorString(cerror)) ;
}
T->gpuMemoryIsOwned = true ;
cerror = cudaMemcpyAsync (T->gpuMemory,
mtens.getMemory(),
T->descriptor.getSizeInBytes(),
cudaMemcpyDeviceToDevice,
sharedSpace->gpuHelperStream) ;
if (cerror != cudaSuccess) {
vlmxError(VLMXE_Execution, "CUDA error while copying VLDT_GPU data (%s).",
cudaGetErrorString(cerror)) ;
}
}
}
#endif
}
supervisor.beginTransaction(T - sharedSpace->tensors.begin()) ;
}
mxArray * ProcessPool::mexPull(std::string const & name, bool inplace)
{
// Search the tensor with the specified name.
SharedTensorSpace::tensors_t::const_iterator T
= std::find(sharedSpace->tensors.begin(), sharedSpace->tensors.end(), name) ;
if (T == sharedSpace->tensors.end()) {
vlmxError(VLMXE_IllegalArgument, "There is no tensor with the specified name.") ;
}
if (inplace && T->descriptor.deviceType != vl::VLDT_GPU) {
vlmxError(VLMXE_IllegalArgument, "Inplace operations are supported only for VLDT_GPU arrays.") ;
}
// Wait until the tensor is in ready state
vl::ErrorCode error = supervisor.waitTensor(T - sharedSpace->tensors.begin()) ;
if (error != vl::VLE_Success) {
vlmxError(VLMXE_Execution, "Timeout or disconnected while waiting for tensor '%s' to become ready.", T->name.c_str()) ;
}
if (inplace) {
// With in-place operations, the only purpose of pull() is to wait until
// the tensor is ready and can be accessed.
return NULL ;
} else {
vl::MexTensor result(context) ;
result.init(T->descriptor.deviceType, T->descriptor.dataType, T->descriptor.shape) ;
if (T->descriptor.deviceType == vl::VLDT_CPU) {
memcpy(result.getMemory(),
T->cpuMemory,
T->descriptor.getSizeInBytes()) ;
} else {
#if ENABLE_GPU
// Synchronous with main thread.
cudaError_t cerror = cudaMemcpyAsync (result.getMemory(),
T->gpuMemory,
T->descriptor.getSizeInBytes(),
cudaMemcpyDeviceToDevice,
sharedSpace->gpuHelperStream) ;
if (cerror != cudaSuccess) {
vlmxError(VLMXE_Execution, "CUDA generated an error while copying VLDT_GPU data: '%s'.",
cudaGetErrorString(cerror)) ;
}
cerror = cudaStreamSynchronize(sharedSpace->gpuHelperStream) ;
if (cerror != cudaSuccess) {
vlmxError(VLMXE_Execution, "CUDA generated an error while synchronizing a stream: '%s'.",
cudaGetErrorString(cerror)) ;
}
#endif
}
return result.relinquish() ;
}
}
/* ---------------------------------------------------------------- */
/* ProcessPool::Supervisor */
/* ---------------------------------------------------------------- */
#pragma mark -
#undef LOGERROR
#define LOGERROR \
vl::Logger().getStream() \
<<"[error]"<<__func__<<"::lab "<<pool.lab<<"::"
#undef LOG
#define LOG(level) \
if (verbosity < level) { } \
else vl::Logger().getStream() \
<<"[info] "<<__func__<<"::lab "<<pool.lab<<"::"
void ProcessPool::Supervisor::threadEntryPoint(void * thing)
{
((ProcessPool::Supervisor*)thing)->entryPoint() ;
}
vl::ErrorCode ProcessPool::Supervisor::init()
{
vl::ErrorCode error = vl::VLE_Success ;
finalize() ;
// Infer parent and children labs.
int bit = ffs(pool.lab) - 1 ;
if (bit == -1) { bit = 31 ; }
int parent = pool.lab & (~(1 << bit)) ;
if (parent != pool.lab) {
// peers[0] always contain the parent (except for root)
peers.push_back(Peer(parent)) ;
}
for (int k = 0 ; k < bit ; ++k) {
int child = pool.lab | (1 << k) ;
if (child < pool.numLabs) {
// Which peers[] gets which children is determined later
// during hadshake based on the random connection order.
// Here we assign a provisional lab index using negative indexes
// as these are needed to use send().
peers.push_back(Peer(-child)) ;
}
}
state = connecting ;
shutdownRequested = false ;
forceQuit = false ;
thread = new tthread::thread(threadEntryPoint, this) ;
// Wait for initialization to be complete.
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
while (state == connecting) {
waitingList.wait(mutex) ;
}
if (state == running) {
error = vl::VLE_Success ;
} else {
error = vl::VLE_Unknown ;
}
}
return error ;
}
void ProcessPool::Supervisor::finalize()
{
if (thread) {
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
forceQuit = true ;
if (pipeFD[1] >= 0) {
char dummy = 1 ;
write(pipeFD[1], &dummy, 1) ;
}
}
if (thread->joinable()) {
thread->join() ;
}
delete thread ;
thread = NULL ;
}
peers.clear() ;
}
vl::ErrorCode ProcessPool::Supervisor::shutdown()
{
// Signal the supervisory thread
shutdownRequested = true ;
char dummy = 1 ;
write(pipeFD[1], &dummy, 1) ;
// Wait for shutdown to complete
{
size_t start = vl::getTime() ;
tthread::lock_guard<tthread::mutex> lock(mutex) ;
while (state != down) {
if (vl::getTime() > start + pool.timeoutInterval) {
LOGERROR << "timeout while shutting down" ;
return vl::VLE_Timeout ;
}
waitingList.wait(mutex) ;
}
}
return vl::VLE_Success ;
}
vl::ErrorCode ProcessPool::Supervisor::beginTransaction(int tensorIndex)
{
vl::ErrorCode error = vl::VLE_Success ;
SharedTensorSpace::SharedTensorInstance & T = pool.sharedSpace->tensors[tensorIndex] ;
T.transaction ++ ;
T.numChildrenToAccumulate = 0 ;
for (int p = (pool.lab > 0) ; p < peers.size() ; ++p) {
SharedTensorSpace::SharedTensorPeerInstance & PT = pool.sharedSpace->peerTensors[tensorIndex][p] ;
PT.accumulated = false ;
T.numChildrenToAccumulate += 1;
}
asm volatile("": : :"memory") ; // Memory barrier: prevents compiler from reordering
T.state = SharedTensorSpace::accumulateChildren ; // Must be last to close transaction
// Signal the supervisory thread
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
char dummy = 1 ;
write(pipeFD[1], &dummy, 1) ;
}
return error ;
}
vl::ErrorCode ProcessPool::Supervisor::waitTensor(int tensorIndex)
{
SharedTensorSpace::SharedTensorInstance & T = pool.sharedSpace->tensors[tensorIndex] ;
size_t start = vl::getTime() ;
tthread::lock_guard<tthread::mutex> lock(mutex) ;
while (T.state != SharedTensorSpace::ready) {
if ((vl::getTime() - start) > pool.timeoutInterval) {
return vl::VLE_Timeout ;
}
if (state != running) {
return vl::VLE_Unknown ;
}
waitingList.wait(mutex) ;
}
return vl::VLE_Success ;
}
vl::ErrorCode ProcessPool::Supervisor::send(Message & msg, int to)
{
// Find connection to peer.
peers_t::const_iterator rel = std::find(peers.begin(), peers.end(), to) ;
assert(rel != peers.end()) ;
// Add complementery information to the message.
msg.session = session ;
msg.from = pool.lab ;
msg.to = to ;
// Send all bytes.
int bytesWritten = 0 ;
int status ;
char * nextByte = (char*)&msg ;
while (bytesWritten < sizeof(msg)) {
status = write(rel->socketFD, nextByte, sizeof(msg) - bytesWritten) ;
if (status == -1) {
LOGERROR
<< "could not send message to " << to
<< " because '" << strerror(errno) << '\'' ;
return vl::VLE_Unknown ;
}
bytesWritten += status ;
}
LOG(3)
<< "sent message to " << to
<< " (type " << msg.type
<< ", state " << msg.tensorState
<< " tensor " << msg.tensorId
<< ')' ;
return vl::VLE_Success ;
}
vl::ErrorCode ProcessPool::Supervisor::receive(Message & msg, int from, int timeout)
{
size_t waited = 0 ; // us
size_t const pollInterval = 1000 ; // us
if (timeout < 0) { timeout = pool.timeoutInterval ; } // us
// find connection to peer
peers_t::const_iterator rel = std::find(peers.begin(), peers.end(), from) ;
assert(rel != peers.end()) ;
// receive all bytes
{
int bytesRead = 0 ;
int status ;
char * nextByte = (char*)&msg ;
while (bytesRead < sizeof(msg)) {
status = read(rel->socketFD, nextByte, sizeof(msg) - bytesRead) ;
if (status == 0 || status == -1) {
if (status == 0 || errno == EAGAIN) {
if (timeout == 0 && bytesRead == 0) {
// non blocking operation, no message, just return no data
return vl::VLE_NoData ;
}
if (timeout > 0 && waited >= timeout) {
if (verbosity >= 1) {
LOGERROR
<< "timed out while receiving a message from lab " << from
<< " because '" << strerror(errno) << '\'' ;
}
return vl::VLE_Timeout ;
}
usleep(pollInterval) ;
waited += pollInterval ;
continue ;
}
if (verbosity >= 1) {
LOGERROR
<< "error while receiving a message from lab " << from
<< ": '" << strerror(errno) << '\'' ;
}
return vl::VLE_Unknown ;
}
bytesRead += status ;
}
}
// check message integrity
if ((msg.type != Message::init &&
msg.type != Message::initDone)
&& (msg.session != session &&
msg.from != from &&
msg.to != pool.lab)) {
LOGERROR
<< "received an unexpected message from lab " << from
<< "\n\tmsg: session:" << msg.session
<< " from:" << msg.from
<< " to:" << msg.to
<< " type:" << msg.type
<< "\n\tthis session:" << this->session ;
return vl::VLE_Unknown ;
}
LOG(3)
<< "received message from "<<from
<< " (type " << msg.type
<< ", state " << msg.tensorState
<< ", tensor " << msg.tensorId
<< ')' ;
return vl::VLE_Success ;
}
/// Establish connections with the peers.
vl::ErrorCode ProcessPool::Supervisor::connect()
{
vl::ErrorCode error = vl::VLE_Success ;
int result ;
char socketName [256] ;
struct sockaddr_un socketAddress ;
size_t start = vl::getTime() ;
pipeFD[0] = -1 ;
pipeFD[1] = -1 ;
socketFD = -1 ;
// Lock for entire duration of connect()
tthread::lock_guard<tthread::mutex> lock(mutex) ;
// Advertise
state = connecting ;
waitingList.notify_all() ;
// Cerate a pipe FD for notification between MATLAB's thread
// and the supervisory thread. This is needed to allow awaking
// the supervisory thread.
result = pipe(pipeFD) ;
if (result == -1) {
pipeFD[0] = -1 ;
pipeFD[1] = -1 ;
LOGERROR
<< "cannot create inter-threads pipe because: '"
<< strerror(errno) << '\'' ;
return vl::VLE_Unknown ;
}
// Create a socket and connect children.
size_t numChildren = peers.size() - (pool.lab > 0) ;
if (numChildren > 0) {
// Get a UNID comain socket.
snprintf(socketName, sizeof(socketName)/sizeof(socketName[0]),
"/%s/%s-socket-%02d", P_tmpdir, pool.prefix.c_str(), pool.lab) ;
socketFD = socket(AF_UNIX, SOCK_STREAM, 0) ;
if (socketFD == -1) {
LOGERROR
<< "cannot create socket " << socketName
<< "because: " << strerror(errno) ;
return vl::VLE_Unknown ;
}
// Copy socket path into socketAddress.
memset(&socketAddress, 0, sizeof(socketAddress)) ;
socketAddress.sun_family = AF_UNIX;
strncpy(socketAddress.sun_path, socketName,
sizeof(socketAddress.sun_path) - 1) ;
// Delete socket path if it exists before binding.
if (access(socketAddress.sun_path, F_OK) == 0) {
unlink(socketAddress.sun_path) ;
}
// Bind socket to address.
result = bind(socketFD,
(struct sockaddr *)&socketAddress,
sizeof(socketAddress)) ;
if (result == -1) {
LOGERROR
<< "cannot bind socket " << socketName
<< "because: " << strerror(errno) ;
return vl::VLE_Unknown ;
}
// Start listening for children connections
result = listen(socketFD, numChildren) ;
if (result == -1) {
LOGERROR
<< "cannot listen to socket " << socketName
<< "because: " << strerror(errno) ;
return vl::VLE_Unknown ;
}
// Do not block on accept().
fcntl(socketFD, F_SETFL, fcntl(socketFD, F_GETFL, 0) | O_NONBLOCK);
// Accept one connection per child.
for (int p = (pool.lab > 0) ; p < peers.size() ; ++p) {
peers[p].socketFD = -1 ;
for (;;) {
peers[p].socketFD = accept(socketFD, NULL, NULL) ;
if (peers[p].socketFD == -1) {
if (errno == EAGAIN || errno == EWOULDBLOCK) {
if (vl::getTime() < start + pool.timeoutInterval) continue ; // retry
LOGERROR
<< "timed out while accepting connection from peer " << peers[p].lab ;
error = vl::VLE_Timeout ;
goto done ;
}
LOGERROR
<< " cannot accept connection from peer " << peers[p].lab
<< " because: " << strerror(errno) ;
error = vl::VLE_Unknown ;
goto done ;
}
break ;
}
fcntl(peers[p].socketFD, F_SETFL,
fcntl(peers[p].socketFD ,F_GETFL, 0) | O_NONBLOCK) ;
}
}
// Connect parent.
if (pool.lab > 0) {
snprintf(socketName, sizeof(socketName)/sizeof(socketName[0]),
"/%s/%s-socket-%02d", P_tmpdir, pool.prefix.c_str(), peers[0].lab) ;
for (;;) {
peers[0].socketFD = socket(AF_UNIX, SOCK_STREAM, 0) ;
if (peers[0].socketFD == -1) {
if (vl::getTime() < start + pool.timeoutInterval) {
// Wait for parent to create socket file.
usleep(100UL * 1000UL) ; // 100 ms (10 times a second)
continue ;
}
LOGERROR
<< "cannot create socket '" << socketName
<< "' because '" << strerror(errno) << '"' ;
error = vl::VLE_Unknown ;
goto done ;
}
break ;
}
fcntl(peers[0].socketFD, F_SETFL,
fcntl(peers[0].socketFD ,F_GETFL, 0) | O_NONBLOCK) ;
// Copy socket path into socketAddress.
memset(&socketAddress, 0, sizeof(socketAddress)) ;
socketAddress.sun_family = AF_UNIX;
strncpy(socketAddress.sun_path, socketName,
sizeof(socketAddress.sun_path) - 1) ;
// Establish connection with parent.
for (int trials = 0 ; ; ++trials) {
int result = ::connect(peers[0].socketFD,
(struct sockaddr *)&socketAddress,
sizeof(socketAddress)) ;
if (result == 0) break ;
if (vl::getTime() < start + pool.timeoutInterval) {
// Wait for parent to start accepting connections.
usleep(100UL * 1000UL) ; // 100 ms (10 times a second)
continue ;
}
LOGERROR
<< "cannot connect socket " << socketName
<< " after trying " << trials
<< " times because '" << strerror(errno) << '"' ;
error = vl::VLE_Unknown ;
goto done ;
}
}
done:
return error ;
}
void ProcessPool::Supervisor::disconnect()
{
// Lock for entire duration of disconnect()
tthread::lock_guard<tthread::mutex> lock(mutex) ;
for (int p = 0 ; p < peers.size() ; ++p) {
if (peers[p].socketFD != -1) {
close(peers[p].socketFD) ;
peers[p].socketFD = -1 ;
}
}
if (socketFD != -1) {
close(socketFD) ;
socketFD = -1 ;
}
char socketName [256] ;
snprintf(socketName, sizeof(socketName)/sizeof(socketName[0]),
"/%s/%s-socket-%02d", P_tmpdir, pool.prefix.c_str(), pool.lab) ;
unlink(socketName) ;
for (int t = 1 ; t >= 0 ; --t) {
if (pipeFD[t] != -1) {
close(pipeFD[t]) ;
pipeFD[t] = -1 ;
}
}
state = down ;
waitingList.notify_all() ;
}
// The purpose of the handshake sequence is to make sure that
// all processes are properly communicating and ready to go.
// It is also required to synchornize the root (which creates several
// shared resources) and the other nodes (which attach them).
vl::ErrorCode ProcessPool::Supervisor::handshake()
{
Message msg ;
vl::ErrorCode error = vl::VLE_Success ;
// Lock for entire duration of handshake()
tthread::lock_guard<tthread::mutex> lock(mutex) ;
LOG(2) << "handshake begins" ;
// receive message from parent (except for root)
if (pool.lab == 0) {
session = (uint32_t)vl::getTime() ;
// root atteches first
error = pool.sharedSpace->attach(pool.prefix, 0, pool.numLabs) ;
if (error != vl::VLE_Success) {
LOGERROR << "root could not attach the shared space" ;
error = vl::VLE_Unknown ;
goto done ;
}
LOG(2) << "root attached the shared tensor space" ;
} else {
error = receive(msg, peers[0].lab) ;
if (error != vl::VLE_Success || msg.type != Message::init) {
LOGERROR << "did not receive a message from parent" ;
error = vl::VLE_Unknown ;
goto done ;
}
session = msg.session ;
// children attach now
error = pool.sharedSpace->attach(pool.prefix, pool.lab, pool.numLabs) ;
if (error != vl::VLE_Success || msg.type != Message::init) {
LOGERROR << "could not attach shared space" ;
error = vl::VLE_Unknown ;
goto done ;
}
LOG(2) << "child attached the shared tensor space" ;
}
// send message to all children
for (int p = (pool.lab > 0) ; p < peers.size() ; ++p) {
msg.type = Message::init ;
error = send(msg,peers[p].lab) ;
if (error != vl::VLE_Success) {
LOGERROR << "could not send a message to a child" ;
goto done ;
}
}
// receive message from all children
for (int p = (pool.lab > 0) ; p < peers.size() ; ++p) {
error = receive(msg,peers[p].lab) ;
if (error != vl::VLE_Success || msg.type != Message::initDone) {
error = vl::VLE_Unknown ;
goto done ;
}
// now we can identify the child lab index
peers[p].lab = msg.from ;
LOG(2) << "connected lab " << msg.from ;
}
// register peer tensors in the same order as peer[]
for (int p = 0 ; p < peers.size() ; ++p) {
pool.sharedSpace->attachPeer(peers[p].lab) ;
}
// send message to parent (excep for root)
if (pool.lab > 0) {
msg.type = Message::initDone ;
error = send(msg, peers[0].lab) ;
if (error != vl::VLE_Success) {
error = vl::VLE_Unknown ;
goto done ;
}
session = msg.session ;
}
done:
if (error != vl::VLE_Success) {
LOGERROR << "handshake failed" ;
} else {
LOG(2) << "handshake terminated successfully" ;
}
return error ;
}
void ProcessPool::Supervisor::entryPoint()
{
vl::ErrorCode error = vl::VLE_Success ;
// Make sure the supervisory thread operates on the same CUDA device
// as the main thread.
#if ENABLE_GPU
if (pool.sharedSpace->gpuDevice >= 0) {
LOG(2) << "setting CUDA device" ;
cudaError_t cerror = cudaSetDevice(pool.sharedSpace->gpuDevice) ;
if (cerror != cudaSuccess) {
LOGERROR
<< "could not switch supervisory thread to CUDA device "
<< pool.sharedSpace->gpuDevice ;
error = vl::VLE_Cuda ;
} else {
LOG(2) << "supervisory thread switched to CUDA device " << pool.sharedSpace->gpuDevice ;
}
}
#endif
if (error == vl::VLE_Success) {
error = connect() ;
}
if (error == vl::VLE_Success) {
error = handshake() ;
}
if (error == vl::VLE_Success) {
error = loop() ;
}
disconnect() ;
}
vl::ErrorCode ProcessPool::Supervisor::handleAccumulateChildren(int tensorIndex)
{
vl::ErrorCode error = vl::VLE_Success ;
SharedTensorSpace::SharedTensorInstance & T = pool.sharedSpace->tensors[tensorIndex] ;
// Search for children ready to be be accumulated.
for (int p = (pool.lab > 0) ; p < peers.size() && error == vl::VLE_Success ; ++p)
{
int peerLab = peers[p].lab ;
SharedTensorSpace::SharedTensorPeerInstance & PT
= pool.sharedSpace->getPeerTensor(tensorIndex, peerLab) ;
bool thisChildReadyForAccumulation =
PT.transaction == T.transaction &&
PT.state == SharedTensorSpace::waitParent &&
PT.accumulated == false ;
if (thisChildReadyForAccumulation) {
switch (T.descriptor.deviceType) {
case vl::VLDT_CPU: {
switch (T.descriptor.dataType) {
case vl::VLDT_Float:
vl::impl::blas<vl::VLDT_CPU,vl::VLDT_Float>::axpy
(context,
T.descriptor.shape.getNumElements(),
1.0f,
(float*)PT.mappedCpuMemory, 1,
(float*)T.cpuMemory, 1) ;
break ;
case vl::VLDT_Double:
vl::impl::blas<vl::VLDT_CPU,vl::VLDT_Double>::axpy
(context,
T.descriptor.shape.getNumElements(),
1.0,
(double*)PT.mappedCpuMemory, 1,
(double*)T.cpuMemory, 1) ;
break ;
default:
assert(false) ;
break ;
}
break ;
}
case vl::VLDT_GPU: {
#if ENABLE_GPU
cudaError_t cerror ;
if (T.gpuMemory == NULL) {
LOGERROR << "internal error: VLDT_GPU memory not allocated for tensor " << T.name ;
error = vl::VLE_Unknown ;
break ;
}
// Copy the copy of the tensor update in the host shared memory map
// to a buffer in the VLDT_GPU.
cerror = cudaMemcpyAsync(pool.sharedSpace->gpuDispatchMemory,
PT.mappedCpuMemory,
T.descriptor.getSizeInBytes(),
cudaMemcpyHostToDevice,
pool.sharedSpace->gpuHelperStream) ;
if (cerror != cudaSuccess) {
LOGERROR
<< "CUDA generated an error while copying data from host to device: "
<< cudaGetErrorString(cerror) ;
error = vl::VLE_Cuda ;
break ;
}
// Sum the update to the current tensor vale.
cudaStream_t previousStream = context.getCudaHelper().getStream() ;
error = context.getCudaHelper().setStream(pool.sharedSpace->gpuHelperStream) ;
if (error != vl::VLE_Success) {
LOGERROR
<< "CUDA generated an error while switching to a different stream:"
<< context.getLastErrorMessage() ;
break ;
}
switch (T.descriptor.dataType) {
case vl::VLDT_Float:
error = vl::impl::blas<vl::VLDT_GPU,vl::VLDT_Float>::axpy
(context,
T.descriptor.shape.getNumElements(),
1.0f,
(float*)pool.sharedSpace->gpuDispatchMemory, 1,
(float*)T.gpuMemory, 1) ;
break ;
case vl::VLDT_Double:
error = vl::impl::blas<vl::VLDT_GPU,vl::VLDT_Double>::axpy
(context,
T.descriptor.shape.getNumElements(),
1.0,
(double*)pool.sharedSpace->gpuDispatchMemory, 1,
(double*)T.gpuMemory, 1) ;
break ;
default:
assert(false) ;
break ;
}
context.getCudaHelper().setStream(previousStream) ;
if (error != vl::VLE_Success) {
LOGERROR << "summing tensors:" << context.getLastErrorMessage() ;
}
#endif
break ;
}
default:
assert(false) ;
break ;
}
PT.accumulated = true ;
-- T.numChildrenToAccumulate ;
LOG(3)
<< "accumulated child " << PT.lab
<< "; " << T.numChildrenToAccumulate << " remaining" ;
} // next peer
}
if (error != vl::VLE_Success) { return error ; }
// If all children have been accumulated, then
// notify the parent and switch to waitParent state.
// Note that we change the PT state too as the peer
// will switch to that upon receiving the notification.
//
// The root is a special case because it
// does not have a parent, so it can switch
// directly to the waitChildren state. However, in order
// to reuse the generic code above, we also set it
// to waitParent and let the next iteration pick this up.
if (T.numChildrenToAccumulate == 0) {
if (T.descriptor.deviceType == vl::VLDT_GPU) {
#if ENABLE_GPU
cudaError_t cerror ;
// Copy the VLDT_GPU tensor to the shared host memory map for other
// processes to use.
cerror = cudaMemcpyAsync(T.cpuMemory,
T.gpuMemory,
T.descriptor.getSizeInBytes(),
cudaMemcpyDeviceToHost,
pool.sharedSpace->gpuHelperStream) ;
if (cerror != cudaSuccess) {
LOGERROR
<< "CUDA error while copying from device to host ("
<< cudaGetErrorString(cerror) << ")" ;
return vl::VLE_Cuda ;
}
// Make this operation synchronous in order
// to make sure that other processes will properly read the
// update only when the copy is complete
cerror = cudaStreamSynchronize(pool.sharedSpace->gpuHelperStream) ;
if (cerror != cudaSuccess) {
LOGERROR
<< "CUDA error while synchronizing a stream: '"
<< cudaGetErrorString(cerror) << '\'' ;
return vl::VLE_Cuda ;
}
#endif
}
T.state = SharedTensorSpace::waitParent ;
if (pool.lab > 0) {
int parentLab = peers[0].lab ;
pool.sharedSpace->getPeerTensor(tensorIndex, parentLab).state = SharedTensorSpace::waitParent ;
Message msg ;
msg.type = Message::tensorStateChange ;
msg.tensorId = tensorIndex ;
msg.tensorState = T.state ;
msg.transaction = T.transaction ;
error = send(msg, parentLab) ;
}
}
return error ;
}
vl::ErrorCode ProcessPool::Supervisor::handleWaitParent(int tensorIndex)
{
vl::ErrorCode error = vl::VLE_Success ;
SharedTensorSpace::SharedTensorInstance & T = pool.sharedSpace->tensors[tensorIndex] ;
// Check if parent finished updating. If so, we can copy its value here
// and notify the children to copy us by switching to waitParent state and
// notifying the children. Note that we change the children peer state too
// as these peers will switch to that upon being notified.
if (pool.lab > 0) {
int parentLab = peers[0].lab ;
SharedTensorSpace::SharedTensorPeerInstance & PT
= pool.sharedSpace->getPeerTensor(tensorIndex, parentLab) ;
bool parentDone = (PT.transaction == T.transaction &&
PT.state == SharedTensorSpace::waitChildren) ;
if (!parentDone) {
return vl::VLE_Success ;
}
switch (T.descriptor.deviceType) {
case vl::VLDT_CPU:
memcpy(T.cpuMemory, PT.mappedCpuMemory, T.descriptor.getSizeInBytes()) ;
break ;
case vl::VLDT_GPU: {
#if ENABLE_GPU
cudaError_t cerror = cudaMemcpyAsync(T.gpuMemory,
PT.mappedCpuMemory,
T.descriptor.getSizeInBytes(),
cudaMemcpyHostToDevice,
pool.sharedSpace->gpuHelperStream) ;
if (cerror != cudaSuccess) {
LOGERROR
<< "propagating parent to children: CUDA generated an error while copying from host to device: '"
<< cudaGetErrorString(cerror) << '\'' ;
error = vl::VLE_Cuda ;
}
#endif
break ;
}
}
if (error != vl::VLE_Success) { return error ; }
}
// We have copied data from parent (or there is no parent at all)
// so we are ready to pass our data to the children and to release
// the parent from waiting on us.
#if ENABLE_GPU
if (T.descriptor.deviceType == vl::VLDT_GPU) {
cudaError_t cerror ;
if (peers.size() > (pool.lab > 0)) {
// There are children (i.e. peers other than parent), so copy data to host
// to deliver it to them.
cerror = cudaMemcpyAsync(T.cpuMemory,
T.gpuMemory,
T.descriptor.getSizeInBytes(),
cudaMemcpyDeviceToHost,
pool.sharedSpace->gpuHelperStream) ;
if (cerror != cudaSuccess) {
LOGERROR
<< "CUDA generated an error while copying from device to host: '"
<< cudaGetErrorString(cerror) << '\'' ;
error = vl::VLE_Cuda ;
}
}
// Synchronize, so it is safe for children on other processes to read
// the memory. Synchronize even if there are no children, so that inplace
// reads from this process are safe.
cerror = cudaStreamSynchronize(pool.sharedSpace->gpuHelperStream) ;
if (cerror != cudaSuccess) {
LOGERROR
<< "CUDA gnereated an error while synchronizing a stream: '"
<< cudaGetErrorString(cerror) << '\'' ;
return vl::VLE_Cuda ;
}
}
#endif
// Notify the parent that we are done copying its data and the children than we are waiting
// on them to copy our data.
T.state = SharedTensorSpace::waitChildren ;
for (int p = 0 ; p < peers.size() ; ++p) {
int peerLab = peers[p].lab ;
SharedTensorSpace::SharedTensorPeerInstance & PT
= pool.sharedSpace->getPeerTensor(tensorIndex, peerLab) ;
PT.state = (pool.lab > 0 && p == 0) ? SharedTensorSpace::ready : SharedTensorSpace::waitChildren ;
Message msg ;
msg.type = Message::tensorStateChange ;
msg.transaction = T.transaction ;
msg.tensorId = tensorIndex ;
msg.tensorState = (pool.lab > 0 && p == 0) ? SharedTensorSpace::ready : SharedTensorSpace::waitChildren ;
error = send(msg, peerLab) ;
}
return error ;
}
vl::ErrorCode ProcessPool::Supervisor::handleWaitChildren(int tensorIndex)
{
vl::ErrorCode error = vl::VLE_Success ;
SharedTensorSpace::SharedTensorInstance & T = pool.sharedSpace->tensors[tensorIndex] ;
// Check if all children finished updating. If so, we can switch
// to ready state and notify the parent.
// Note that we change the peer children state too
// as these peers will switch to that upon being notified.
bool allChildrenDone = true ;
for (int p = (pool.lab > 0) ; p < peers.size() ; ++p) {
int peerLab = peers[p].lab ;
SharedTensorSpace::SharedTensorPeerInstance & PT
= pool.sharedSpace->getPeerTensor(tensorIndex, peerLab) ;
bool thisChildDone =((PT.transaction == T.transaction &&
PT.state == SharedTensorSpace::ready) ||
PT.transaction > T.transaction) ;
allChildrenDone &= thisChildDone ;
}
if (allChildrenDone) {
tthread::lock_guard<tthread::mutex> lock(mutex) ;
T.state = SharedTensorSpace::ready ;
waitingList.notify_all() ;
}
return error ;
}
vl::ErrorCode ProcessPool::Supervisor::loop()
{
vl::ErrorCode error = vl::VLE_Success ;
LOG(2) << "loop begins" ;
// Advertise. Note that we do not lock extensively in the main
// loop. Syncrhonization with the main thread is kept efficient
// using lock-free mechanisms.
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
state = running ;
waitingList.notify_all() ;
}
int pollStatus = 0 ;
size_t const pollInterval = 499UL ; // allow heartbeats (ms)
size_t const heartbeatInterval = 500UL * 1000UL * 1000UL ; // (ns)
size_t lastHeartbeat = vl::getTime() ;
struct pollfd * polls = new struct pollfd [peers.size() + 1] ;
for (int p = 0 ; p < peers.size() ; ++p) {
polls[p].fd = peers[p].socketFD ;
polls[p].events = POLLIN | POLLHUP | POLLERR | POLLNVAL ;
}
polls[peers.size()].fd = pipeFD[0] ;
polls[peers.size()].events = POLLIN ;
while (error == vl::VLE_Success && forceQuit == false)
{
// Generate regular heartbeats to wake up the main thread at
// regular interval and allow it to time out on
// user commands usch as pull() and push().
size_t now = vl::getTime() ;
if (now > lastHeartbeat + heartbeatInterval) {
waitingList.notify_all() ; // no need to lock
lastHeartbeat = now ;
}
// Wait for incoming messages or a timeout.
pollStatus = poll(polls, peers.size() + 1, pollInterval) ;
if (pollStatus < 0) {
error = vl::VLE_Unknown ;
continue ;
}
// Timeout!
if (pollStatus == 0) {
LOG(1) << "Polling timed out on lab " << pool.sharedSpace->lab ;
// pool.sharedSpace->dump() ;
}
// Check for messages piped from the main thread.
if (polls[peers.size()].revents & POLLIN) {
LOG(3) << "supervisory thread notified by the main thread" ;
char dummy ;
read(pipeFD[0], &dummy, 1) ;
}
// Check for messages from other processes.
for (int p = 0 ; p < peers.size() && error == vl::VLE_Success ; ++ p)
{
// Check for communication errors.
if (polls[p].revents & (POLLHUP | POLLERR | POLLNVAL)) {
LOG(3) << "one of the sockets generated an error, quitting" ;
error = vl::VLE_Unknown ;
break ;
}
// Skip this peer if there is no incoming data.
if ((polls[p].revents & POLLIN) == 0) continue ;
// Receive the message.
Message msg ;
error = receive(msg, peers[p].lab) ;
if (error != vl::VLE_Success) {
LOGERROR << "error while receiving a message from lab " << peers[p].lab ;
break ;
}
// Process the message.
switch (msg.type) {
case Message::tensorStateChange: {
// Record the new state for later.
LOG(3)
<< "received tensor state change from lab " << msg.from
<< " for tensor " << pool.sharedSpace->tensors[msg.tensorId].name.c_str()
<< " to state " << msg.tensorState
<< " for transaction " << msg.transaction ;
SharedTensorSpace::SharedTensorPeerInstance & T
= pool.sharedSpace->getPeerTensor(msg.tensorId, msg.from) ;
T.state = msg.tensorState ;
T.transaction = msg.transaction ;
break ;
}
case Message::requestShutdown: {
peers_t::iterator P = std::find(peers.begin(), peers.end(), msg.from) ;
P->shutdownRequested = true ;
break ;
}
case Message::tensorFinalTransaction: {
peers_t::iterator P = std::find(peers.begin(), peers.end(), msg.from) ;
SharedTensorSpace::SharedTensorInstance & T = pool.sharedSpace->tensors[msg.tensorId];
LOG(3)
<< "received final transaction from lab " << msg.from
<< " for tensor " << T.name.c_str()
<< " to transaction " << msg.finalTransaction ;
int sourcePeer = msg.from ;
if (msg.finalTransaction < T.finalTransaction) {
T.finalTransaction = msg.finalTransaction ;
for (int q = 0 ; q < peers.size() ; ++q) {
if (sourcePeer == peers[q].lab) continue ;
error = send(msg, peers[q].lab) ;
if (error != vl::VLE_Success) {
LOGERROR
<< "error while sending a message to lab "
<< peers[p].lab ;
break ;
}
}
}
break ;
}
default:
// Unexpected message.
LOGERROR << "received an unexpected message" ;
error = vl::VLE_Unknown ;
break ;
}
}
// Check all tensors for actions. Keep updating each tensor until its
// state does not change anymore.
for (int tensorIndex = 0 ; tensorIndex < pool.sharedSpace->tensors.size() && error == vl::VLE_Success ; ++tensorIndex)
{
SharedTensorSpace::SharedTensorState currentState ;
SharedTensorSpace::SharedTensorInstance & T = pool.sharedSpace->tensors[tensorIndex] ;
do {
currentState = T.state ;
LOG(3) << "visiting tensor " << T.name << " in state " << T.state ;
// Detect interruptions
if (T.transaction > T.finalTransaction) {
LOG(1) << "detected interrupded transaction for tensor " << T.name <<
" (transaction:"<<T.transaction<<" > final_transaction:"<<T.finalTransaction<<")";
error = vl::VLE_Interrupted ;
continue ;
}
switch (T.state) {
case SharedTensorSpace::ready:
break ;
case SharedTensorSpace::accumulateChildren:
error = handleAccumulateChildren(tensorIndex) ;
break ;
case SharedTensorSpace::waitParent :
error = handleWaitParent(tensorIndex) ;
break ;
case SharedTensorSpace::waitChildren :
error = handleWaitChildren(tensorIndex) ;
break ;
}
} while (T.state != currentState && error == vl::VLE_Success) ;
}
// Upon shutting down, propagate a message to let other nodes know that
// no further transaction can be processed for each tensor.
if (shutdownRequested && (state == running) && (error == vl::VLE_Success)) {
LOG(3) << "sending final transaction for all tensors" ;
for (int i = 0 ; i < pool.sharedSpace->tensors.size() ; ++i) {
SharedTensorSpace::SharedTensorInstance & tensor = pool.sharedSpace->tensors[i] ;
if (tensor.finalTransaction > tensor.transaction) {
tensor.finalTransaction = tensor.transaction ;
Message msg ;
msg.type = Message::tensorFinalTransaction ;
msg.tensorId = i ;
msg.finalTransaction = tensor.finalTransaction ;
for (int p = 0 ; p < peers.size() ; ++p) {
error = send(msg, peers[p].lab) ;
if (error != vl::VLE_Success) {
LOGERROR
<< "error while sending a message to lab "
<< peers[p].lab ;
break ;
}
}
}
}
}
// Check for other actions.
if (shutdownRequested && (state == running) && (error == vl::VLE_Success)) {
// Check if the children are also in shutdown mode
bool allDone = true ;
for (int p = (pool.lab > 0) ; p < peers.size() ; ++p) {
allDone &= peers[p].shutdownRequested ;
}
if (allDone) {
state = Supervisor::shuttingDown ; // avoid sending the same message again later
if (pool.lab > 0) {
LOG(2) << "subtree ready to shutdown, telling parent lab" ;
Message msg ;
msg.type = Message::requestShutdown ;
error = send(msg, peers[0].lab) ;
} else {
// Other processes will stop when connections are broken.
LOG(2) << "everyone requested shutdown, root lab quitting" ;
break ; // out of poll loop
}
}
}
} // back to poll
LOG(2) << "terminating supervisory thread loop (error = " << error << ')' ;
delete [] polls ;
return error ;
}
/* ---------------------------------------------------------------- */
/* Context */
/* ---------------------------------------------------------------- */
#pragma mark -
ProcessPool processPool ;
/*
Resetting the context here resolves a crash when MATLAB quits and
the ~Context function is implicitly called on unloading the MEX file.
*/
void atExit()
{
processPool.finalize() ;
context.clear() ;
}
/* ---------------------------------------------------------------- */
/* MEX driver */
/* ---------------------------------------------------------------- */
void mexFunction(int nout, mxArray *out[],
int nin, mxArray const *in[])
{
int opt ;
int next = IN_END ;
mxArray const *optarg ;
enum Commands { init, stats, reset, push, pull } command ;
bool inplace = false ;
std::string tensorName ;
std::string prefix = "mcn" ;
mxArray const * arg ;
vl::ErrorCode error = vl::VLE_Success ;
size_t labIndex = 0 ;
size_t numLabs = 0 ;
verbosity = 0 ;
mexAtExit(atExit) ;
/* -------------------------------------------------------------- */
/* Check the arguments */
/* -------------------------------------------------------------- */
if (nin < 1) {
vlmxError(VLMXE_IllegalArgument, "Not enough input arguments.") ;
}
if (!vlmxIsString(in[0], -1)) {
vlmxError(VLMXE_IllegalArgument, "COMMAND is not a string.") ;
}
if (vlmxCompareToStringI(in[0],"init") == 0) {
command = init ;
if (nin < 4) {
vlmxError(VLMXE_IllegalArgument, "Less than three arguments passed to INIT.") ;
}
arg = in[1] ;
if (!vlmxIsPlainScalar(in[2])) {
vlmxError(VLMXE_IllegalArgument, "LABINDEX is not a plain scalar.") ;
}
labIndex = mxGetScalar(in[2]) ;
if (labIndex < 1) {
vlmxError(VLMXE_IllegalArgument, "LABINDEX must be an integer greater than 0.") ;
}
if (!vlmxIsPlainScalar(in[3])) {
vlmxError(VLMXE_IllegalArgument, "NUMLABS is not a plain scalar.") ;
}
numLabs = mxGetScalar(in[3]) ;
if (numLabs < labIndex) {
vlmxError(VLMXE_IllegalArgument, "NUMLABS must be an integer greater or equal to LABINDEX.") ;
}
next = 4 ;
} else if (vlmxCompareToStringI(in[0], "stats") == 0) {
command = stats ;
next = 1 ;
} else if (vlmxCompareToStringI(in[0], "reset") == 0) {
command = reset ;
next = 1 ;
} else if (vlmxCompareToStringI(in[0], "push") == 0) {
if (nin < 3) {
vlmxError(VLMXE_IllegalArgument, "Less than three arguments passed to PUSH.") ;
}
command = push ;
VLMXErrorCode error = vlmxParseString(tensorName, in[1]) ;
if (error != VLMXE_Success) {
vlmxError(error, "NAME is not a string.") ;
}
arg = in[2] ;
next = 3 ;
} else if (vlmxCompareToStringI(in[0], "pull") == 0) {
if (nin < 2) {
mexErrMsgTxt("Less than two arguments passed to PULL.") ;
}
command = pull ;
VLMXErrorCode error = vlmxParseString(tensorName, in[1]) ;
if (error != VLMXE_Success) {
vlmxError(error, "NAME is not a string.") ;
}
next = 2 ;
}
else {
vlmxError(VLMXE_IllegalArgument, "Unknown COMMAND.") ;
}
// optional arguments
while ((opt = vlmxNextOption (in, nin, options, &next, &optarg)) >= 0) {
switch (opt) {
case opt_prefix : {
if (!vlmxIsString(optarg, -1)) {
vlmxError(VLMXE_IllegalArgument, "PREFIX is not a string.") ;
}
char str [512] ;
mxGetString (optarg, str, sizeof(str)/sizeof(str[0])) ;
prefix = str ;
break ;
}
case opt_verbose :
++ verbosity ;
break ;
case opt_inplace :
inplace = true ;
break ;
}
}
switch (command) {
case init:
{
(verbosity >= 2) && mexPrintf("vl_tmove: command 'init'\n") ;
// Initialize shared space. mexInit() may thorow a MEX error;
// the auto_ptr should avoid a leak in this case.
std::auto_ptr<SharedTensorSpace> sharedSpace(new SharedTensorSpace()) ;
sharedSpace->mexInit(arg) ;
// Initialize the pool, including attaching the shared space.
// Now the shared space is owned by the process pool.
error = processPool.init(prefix, labIndex - 1, numLabs, sharedSpace.release()) ;
if (error != vl::VLE_Success) {
mexErrMsgTxt("Could not initialize connections to other MATLAB labs.") ;
}
// At this point, sharedSpace is handled by the ProcessPool thread,
// so we interact with it indirectly
break ;
}
case stats :
(verbosity >= 2) && mexPrintf("vl_tmove: command 'stats'\n") ;
processPool.mexPrint() ;
break ;
case push :
(verbosity >= 2) && mexPrintf("vl_tmove: command 'push' on tensor '%s'%s\n", tensorName.c_str(), inplace?" (inplace)":"") ;
processPool.mexPush(tensorName, arg, inplace) ;
break ;
case pull :
(verbosity >= 2) && mexPrintf("vl_tmove: command 'pull' on tensor '%s'%s\n", tensorName.c_str(),
inplace?" (inplace)":"") ;
out[0] = processPool.mexPull(tensorName, inplace) ;
break ;
case reset :
(verbosity >= 2) && mexPrintf("vl_tmove: command 'reset'\n") ;
processPool.shutdown() ; // gracefully (wait for others to finish)
processPool.finalize() ; // no matter what
break ;
}
}
|
bedd4622d3bf5479a796972aec9ef01cad9cb8d0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) ICG. All rights reserved.
*
* Institute for Computer Graphics and Vision
* Graz University of Technology / Austria
*
*
* This software is distributed WITHOUT ANY WARRANTY; without even
* the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the above copyright notices for more information.
*
*
* Project : ImageUtilities
* Module : Geometric Transformation
* Class : none
* Language : CUDA
* Description : Implementation of CUDA wrappers for remap operations
*
* Author : Manuel Werlberger
* EMail : werlberger@icg.tugraz.at
*
*/
#include <iostream>
#include <iudefs.h>
#include <iucutil.h>
#include <iucore/iutextures.cuh>
#ifndef IUTRANSFORM_REMAP_CU
#define IUTRANSFORM_REMAP_CU
namespace iuprivate {
// local textures
texture<float, 2, hipReadModeElementType> tex_remap_dx_32f_C1__;
texture<float, 2, hipReadModeElementType> tex_remap_dy_32f_C1__;
/** Remap input image (tex1) with disparities (tex_remap_dx, tex_remap_dy). */
// linear interpolation
// 32f_C1
__global__ void cuRemapKernel_32f_C1(float *dst, size_t stride, int width, int height)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
// texutre coordinates
const float xx = x+0.5f;
const float yy = y+0.5f;
// warped texutre coordinates
const float wx = xx + tex2D(tex_remap_dx_32f_C1__, xx, yy);
const float wy = yy + tex2D(tex_remap_dy_32f_C1__, xx, yy);
if (x<width && y<height) // Check if out coordinates lie inside output image
{
dst[y*stride+x] = tex2D(tex1_32f_C1__, wx, wy);
}
}
// cubic interpolation
__global__ void cuRemapCubicKernel_32f_C1(float *dst, size_t stride, int width, int height)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
// texutre coordinates
const float xx = x+0.5f;
const float yy = y+0.5f;
// warped texutre coordinates
const float wx = xx + tex2D(tex_remap_dx_32f_C1__, xx, yy);
const float wy = yy + tex2D(tex_remap_dy_32f_C1__, xx, yy);
if (x<width && y<height) // Check if out coordinates lie inside output image
{
dst[y*stride+x] = iu::cubicTex2DSimple(tex1_32f_C1__, wx, wy);
}
}
// cubic spline interpolation
__global__ void cuRemapCubicSplineKernel_32f_C1(float *dst, size_t stride, int width, int height)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
// texutre coordinates
const float xx = x+0.5f;
const float yy = y+0.5f;
// warped texutre coordinates
const float wx = xx + tex2D(tex_remap_dx_32f_C1__, xx, yy);
const float wy = yy + tex2D(tex_remap_dy_32f_C1__, xx, yy);
if (x<width && y<height) // Check if out coordinates lie inside output image
{
dst[y*stride+x] = iu::cubicTex2D(tex1_32f_C1__, wx, wy);
}
}
//-----------------------------------------------------------------------------
void cuRemap(iu::ImageGpu_32f_C1* src,
iu::ImageGpu_32f_C1* dx_map, iu::ImageGpu_32f_C1* dy_map,
iu::ImageGpu_32f_C1* dst, IuInterpolationType interpolation)
{
tex1_32f_C1__.addressMode[0] = hipAddressModeClamp;
tex1_32f_C1__.addressMode[1] = hipAddressModeClamp;
tex1_32f_C1__.normalized = false;
tex_remap_dx_32f_C1__.addressMode[0] = hipAddressModeClamp;
tex_remap_dx_32f_C1__.addressMode[1] = hipAddressModeClamp;
tex_remap_dx_32f_C1__.normalized = false;
tex_remap_dx_32f_C1__.filterMode = hipFilterModePoint;
tex_remap_dy_32f_C1__.addressMode[0] = hipAddressModeClamp;
tex_remap_dy_32f_C1__.addressMode[1] = hipAddressModeClamp;
tex_remap_dy_32f_C1__.normalized = false;
tex_remap_dy_32f_C1__.filterMode = hipFilterModePoint;
// bind src image to texture and use as input for reduction
hipChannelFormatDesc channel_desc = hipCreateChannelDesc<float>();
hipBindTexture2D(0, &tex1_32f_C1__, src->data(), &channel_desc, src->width(), src->height(), src->pitch());
hipBindTexture2D(0, &tex_remap_dx_32f_C1__, dx_map->data(), &channel_desc, dx_map->width(), dx_map->height(), dx_map->pitch());
hipBindTexture2D(0, &tex_remap_dy_32f_C1__, dy_map->data(), &channel_desc, dy_map->width(), dy_map->height(), dy_map->pitch());
// fragmentation
unsigned int block_size = 16;
dim3 dimBlock(block_size, block_size);
dim3 dimGridOut(iu::divUp(dst->width(), dimBlock.x), iu::divUp(dst->height(), dimBlock.y));
switch(interpolation)
{
case IU_INTERPOLATE_NEAREST:
case IU_INTERPOLATE_CUBIC:
case IU_INTERPOLATE_CUBIC_SPLINE:
tex1_32f_C1__.filterMode = hipFilterModePoint;
break;
case IU_INTERPOLATE_LINEAR:
tex1_32f_C1__.filterMode = hipFilterModeLinear;
break;
}
switch(interpolation)
{
case IU_INTERPOLATE_NEAREST:
case IU_INTERPOLATE_LINEAR: // fallthrough intended
hipLaunchKernelGGL(( cuRemapKernel_32f_C1) , dim3(dimGridOut), dim3(dimBlock) , 0, 0,
dst->data(), dst->stride(), dst->width(), dst->height());
break;
case IU_INTERPOLATE_CUBIC:
hipLaunchKernelGGL(( cuRemapCubicKernel_32f_C1) , dim3(dimGridOut), dim3(dimBlock) , 0, 0,
dst->data(), dst->stride(), dst->width(), dst->height());
break;
case IU_INTERPOLATE_CUBIC_SPLINE:
hipLaunchKernelGGL(( cuRemapCubicSplineKernel_32f_C1) , dim3(dimGridOut), dim3(dimBlock) , 0, 0,
dst->data(), dst->stride(), dst->width(), dst->height());
break;
}
hipUnbindTexture(&tex1_32f_C1__);
hipUnbindTexture(&tex_remap_dx_32f_C1__);
hipUnbindTexture(&tex_remap_dy_32f_C1__);
IU_CUDA_CHECK();
}
//-----------------------------------------------------------------------------
// 8u_C1
__global__ void cuRemapLinearInterpKernel_8u_C1(unsigned char*dst, size_t stride, int width, int height)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
// texutre coordinates
const float xx = x+0.5f;
const float yy = y+0.5f;
// warped texutre coordinates
const float wx = xx + tex2D(tex_remap_dx_32f_C1__, xx, yy);
const float wy = yy + tex2D(tex_remap_dy_32f_C1__, xx, yy);
if (x<width && y<height) // Check if out coordinates lie inside output image
{
int wx1 = IUMAX(0, static_cast<int>(wx));
int wx2 = IUMIN(width, wx1+1);
int wy1 = IUMAX(0, static_cast<int>(wy));
int wy2 = IUMIN(height, wy1+1);
float dx = wx2-xx;
float dy = wy2-yy;
float val1 = dx*dy*static_cast<float>(tex2D(tex1_8u_C1__,wx1,wy1))/255.0f;
float val2 = dx*(1-dy)*static_cast<float>(tex2D(tex1_8u_C1__,wx1,wy2))/255.0f;
float val3 = (1-dx)*dy*static_cast<float>(tex2D(tex1_8u_C1__,wx2,wy1))/255.0f;
float val4 = (1-dx)*(1-dy)*static_cast<float>(tex2D(tex1_8u_C1__,wx2,wy2))/255.0f;
dst[y*stride+x] = (val1 + val2 + val3 + val4) * 255;
//dst[y*stride+x] = tex2D(tex1_8u_C1__, wx, wy);
}
}
//-----------------------------------------------------------------------------
// 8u_C1
__global__ void cuRemapPointInterpKernel_8u_C1(unsigned char*dst, size_t stride, int width, int height)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
// texutre coordinates
const float xx = x+0.5f;
const float yy = y+0.5f;
// warped texutre coordinates
const float wx = xx + tex2D(tex_remap_dx_32f_C1__, xx, yy);
const float wy = yy + tex2D(tex_remap_dy_32f_C1__, xx, yy);
if (x<width && y<height) // Check if out coordinates lie inside output image
{
dst[y*stride+x] = tex2D(tex1_8u_C1__, wx, wy);
}
}
//-----------------------------------------------------------------------------
void cuRemap(iu::ImageGpu_8u_C1* src,
iu::ImageGpu_32f_C1* dx_map, iu::ImageGpu_32f_C1* dy_map,
iu::ImageGpu_8u_C1* dst, IuInterpolationType interpolation)
{
tex1_8u_C1__.addressMode[0] = hipAddressModeClamp;
tex1_8u_C1__.addressMode[1] = hipAddressModeClamp;
tex1_8u_C1__.normalized = false;
tex1_8u_C1__.filterMode = hipFilterModePoint;
tex_remap_dx_32f_C1__.addressMode[0] = hipAddressModeClamp;
tex_remap_dx_32f_C1__.addressMode[1] = hipAddressModeClamp;
tex_remap_dx_32f_C1__.normalized = false;
tex_remap_dx_32f_C1__.filterMode = hipFilterModePoint;
tex_remap_dy_32f_C1__.addressMode[0] = hipAddressModeClamp;
tex_remap_dy_32f_C1__.addressMode[1] = hipAddressModeClamp;
tex_remap_dy_32f_C1__.normalized = false;
tex_remap_dy_32f_C1__.filterMode = hipFilterModePoint;
// bind src image to texture and use as input for reduction
hipChannelFormatDesc channel_desc_8u_C1 = hipCreateChannelDesc<unsigned char>();
hipChannelFormatDesc channel_desc = hipCreateChannelDesc<float>();
hipBindTexture2D(0, &tex1_8u_C1__, src->data(), &channel_desc_8u_C1, src->width(), src->height(), src->pitch());
hipBindTexture2D(0, &tex_remap_dx_32f_C1__, dx_map->data(), &channel_desc, dx_map->width(), dx_map->height(), dx_map->pitch());
hipBindTexture2D(0, &tex_remap_dy_32f_C1__, dy_map->data(), &channel_desc, dy_map->width(), dy_map->height(), dy_map->pitch());
// fragmentation
unsigned int block_size = 16;
dim3 dimBlock(block_size, block_size);
dim3 dimGridOut(iu::divUp(dst->width(), dimBlock.x), iu::divUp(dst->height(), dimBlock.y));
// switch(interpolation)
// {
// case IU_INTERPOLATE_NEAREST:
// case IU_INTERPOLATE_CUBIC:
// case IU_INTERPOLATE_CUBIC_SPLINE:
// tex1_8u_C1__.filterMode = hipFilterModePoint;
// break;
// case IU_INTERPOLATE_LINEAR:
// tex1_8u_C1__.filterMode = hipFilterModeLinear;
// break;
// }
// switch(interpolation)
// {
// case IU_INTERPOLATE_LINEAR: // fallthrough intended
// cuRemapLinearInterpKernel_8u_C1 <<< dimGridOut, dimBlock >>> (
// dst->data(), dst->stride(), dst->width(), dst->height());
// break;
// default:
// case IU_INTERPOLATE_NEAREST:
hipLaunchKernelGGL(( cuRemapPointInterpKernel_8u_C1) , dim3(dimGridOut), dim3(dimBlock) , 0, 0,
dst->data(), dst->stride(), dst->width(), dst->height());
// break;
// }
hipUnbindTexture(&tex1_8u_C1__);
hipUnbindTexture(&tex_remap_dx_32f_C1__);
hipUnbindTexture(&tex_remap_dy_32f_C1__);
IU_CUDA_CHECK();
}
} // namespace iuprivate
#endif // IUTRANSFORM_REMAP_CU
|
bedd4622d3bf5479a796972aec9ef01cad9cb8d0.cu
|
/*
* Copyright (c) ICG. All rights reserved.
*
* Institute for Computer Graphics and Vision
* Graz University of Technology / Austria
*
*
* This software is distributed WITHOUT ANY WARRANTY; without even
* the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the above copyright notices for more information.
*
*
* Project : ImageUtilities
* Module : Geometric Transformation
* Class : none
* Language : CUDA
* Description : Implementation of CUDA wrappers for remap operations
*
* Author : Manuel Werlberger
* EMail : werlberger@icg.tugraz.at
*
*/
#include <iostream>
#include <iudefs.h>
#include <iucutil.h>
#include <iucore/iutextures.cuh>
#ifndef IUTRANSFORM_REMAP_CU
#define IUTRANSFORM_REMAP_CU
namespace iuprivate {
// local textures
texture<float, 2, cudaReadModeElementType> tex_remap_dx_32f_C1__;
texture<float, 2, cudaReadModeElementType> tex_remap_dy_32f_C1__;
/** Remap input image (tex1) with disparities (tex_remap_dx, tex_remap_dy). */
// linear interpolation
// 32f_C1
__global__ void cuRemapKernel_32f_C1(float *dst, size_t stride, int width, int height)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
// texutre coordinates
const float xx = x+0.5f;
const float yy = y+0.5f;
// warped texutre coordinates
const float wx = xx + tex2D(tex_remap_dx_32f_C1__, xx, yy);
const float wy = yy + tex2D(tex_remap_dy_32f_C1__, xx, yy);
if (x<width && y<height) // Check if out coordinates lie inside output image
{
dst[y*stride+x] = tex2D(tex1_32f_C1__, wx, wy);
}
}
// cubic interpolation
__global__ void cuRemapCubicKernel_32f_C1(float *dst, size_t stride, int width, int height)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
// texutre coordinates
const float xx = x+0.5f;
const float yy = y+0.5f;
// warped texutre coordinates
const float wx = xx + tex2D(tex_remap_dx_32f_C1__, xx, yy);
const float wy = yy + tex2D(tex_remap_dy_32f_C1__, xx, yy);
if (x<width && y<height) // Check if out coordinates lie inside output image
{
dst[y*stride+x] = iu::cubicTex2DSimple(tex1_32f_C1__, wx, wy);
}
}
// cubic spline interpolation
__global__ void cuRemapCubicSplineKernel_32f_C1(float *dst, size_t stride, int width, int height)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
// texutre coordinates
const float xx = x+0.5f;
const float yy = y+0.5f;
// warped texutre coordinates
const float wx = xx + tex2D(tex_remap_dx_32f_C1__, xx, yy);
const float wy = yy + tex2D(tex_remap_dy_32f_C1__, xx, yy);
if (x<width && y<height) // Check if out coordinates lie inside output image
{
dst[y*stride+x] = iu::cubicTex2D(tex1_32f_C1__, wx, wy);
}
}
//-----------------------------------------------------------------------------
void cuRemap(iu::ImageGpu_32f_C1* src,
iu::ImageGpu_32f_C1* dx_map, iu::ImageGpu_32f_C1* dy_map,
iu::ImageGpu_32f_C1* dst, IuInterpolationType interpolation)
{
tex1_32f_C1__.addressMode[0] = cudaAddressModeClamp;
tex1_32f_C1__.addressMode[1] = cudaAddressModeClamp;
tex1_32f_C1__.normalized = false;
tex_remap_dx_32f_C1__.addressMode[0] = cudaAddressModeClamp;
tex_remap_dx_32f_C1__.addressMode[1] = cudaAddressModeClamp;
tex_remap_dx_32f_C1__.normalized = false;
tex_remap_dx_32f_C1__.filterMode = cudaFilterModePoint;
tex_remap_dy_32f_C1__.addressMode[0] = cudaAddressModeClamp;
tex_remap_dy_32f_C1__.addressMode[1] = cudaAddressModeClamp;
tex_remap_dy_32f_C1__.normalized = false;
tex_remap_dy_32f_C1__.filterMode = cudaFilterModePoint;
// bind src image to texture and use as input for reduction
cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<float>();
cudaBindTexture2D(0, &tex1_32f_C1__, src->data(), &channel_desc, src->width(), src->height(), src->pitch());
cudaBindTexture2D(0, &tex_remap_dx_32f_C1__, dx_map->data(), &channel_desc, dx_map->width(), dx_map->height(), dx_map->pitch());
cudaBindTexture2D(0, &tex_remap_dy_32f_C1__, dy_map->data(), &channel_desc, dy_map->width(), dy_map->height(), dy_map->pitch());
// fragmentation
unsigned int block_size = 16;
dim3 dimBlock(block_size, block_size);
dim3 dimGridOut(iu::divUp(dst->width(), dimBlock.x), iu::divUp(dst->height(), dimBlock.y));
switch(interpolation)
{
case IU_INTERPOLATE_NEAREST:
case IU_INTERPOLATE_CUBIC:
case IU_INTERPOLATE_CUBIC_SPLINE:
tex1_32f_C1__.filterMode = cudaFilterModePoint;
break;
case IU_INTERPOLATE_LINEAR:
tex1_32f_C1__.filterMode = cudaFilterModeLinear;
break;
}
switch(interpolation)
{
case IU_INTERPOLATE_NEAREST:
case IU_INTERPOLATE_LINEAR: // fallthrough intended
cuRemapKernel_32f_C1 <<< dimGridOut, dimBlock >>> (
dst->data(), dst->stride(), dst->width(), dst->height());
break;
case IU_INTERPOLATE_CUBIC:
cuRemapCubicKernel_32f_C1 <<< dimGridOut, dimBlock >>> (
dst->data(), dst->stride(), dst->width(), dst->height());
break;
case IU_INTERPOLATE_CUBIC_SPLINE:
cuRemapCubicSplineKernel_32f_C1 <<< dimGridOut, dimBlock >>> (
dst->data(), dst->stride(), dst->width(), dst->height());
break;
}
cudaUnbindTexture(&tex1_32f_C1__);
cudaUnbindTexture(&tex_remap_dx_32f_C1__);
cudaUnbindTexture(&tex_remap_dy_32f_C1__);
IU_CUDA_CHECK();
}
//-----------------------------------------------------------------------------
// 8u_C1
__global__ void cuRemapLinearInterpKernel_8u_C1(unsigned char*dst, size_t stride, int width, int height)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
// texutre coordinates
const float xx = x+0.5f;
const float yy = y+0.5f;
// warped texutre coordinates
const float wx = xx + tex2D(tex_remap_dx_32f_C1__, xx, yy);
const float wy = yy + tex2D(tex_remap_dy_32f_C1__, xx, yy);
if (x<width && y<height) // Check if out coordinates lie inside output image
{
int wx1 = IUMAX(0, static_cast<int>(wx));
int wx2 = IUMIN(width, wx1+1);
int wy1 = IUMAX(0, static_cast<int>(wy));
int wy2 = IUMIN(height, wy1+1);
float dx = wx2-xx;
float dy = wy2-yy;
float val1 = dx*dy*static_cast<float>(tex2D(tex1_8u_C1__,wx1,wy1))/255.0f;
float val2 = dx*(1-dy)*static_cast<float>(tex2D(tex1_8u_C1__,wx1,wy2))/255.0f;
float val3 = (1-dx)*dy*static_cast<float>(tex2D(tex1_8u_C1__,wx2,wy1))/255.0f;
float val4 = (1-dx)*(1-dy)*static_cast<float>(tex2D(tex1_8u_C1__,wx2,wy2))/255.0f;
dst[y*stride+x] = (val1 + val2 + val3 + val4) * 255;
//dst[y*stride+x] = tex2D(tex1_8u_C1__, wx, wy);
}
}
//-----------------------------------------------------------------------------
// 8u_C1
__global__ void cuRemapPointInterpKernel_8u_C1(unsigned char*dst, size_t stride, int width, int height)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
// texutre coordinates
const float xx = x+0.5f;
const float yy = y+0.5f;
// warped texutre coordinates
const float wx = xx + tex2D(tex_remap_dx_32f_C1__, xx, yy);
const float wy = yy + tex2D(tex_remap_dy_32f_C1__, xx, yy);
if (x<width && y<height) // Check if out coordinates lie inside output image
{
dst[y*stride+x] = tex2D(tex1_8u_C1__, wx, wy);
}
}
//-----------------------------------------------------------------------------
void cuRemap(iu::ImageGpu_8u_C1* src,
iu::ImageGpu_32f_C1* dx_map, iu::ImageGpu_32f_C1* dy_map,
iu::ImageGpu_8u_C1* dst, IuInterpolationType interpolation)
{
tex1_8u_C1__.addressMode[0] = cudaAddressModeClamp;
tex1_8u_C1__.addressMode[1] = cudaAddressModeClamp;
tex1_8u_C1__.normalized = false;
tex1_8u_C1__.filterMode = cudaFilterModePoint;
tex_remap_dx_32f_C1__.addressMode[0] = cudaAddressModeClamp;
tex_remap_dx_32f_C1__.addressMode[1] = cudaAddressModeClamp;
tex_remap_dx_32f_C1__.normalized = false;
tex_remap_dx_32f_C1__.filterMode = cudaFilterModePoint;
tex_remap_dy_32f_C1__.addressMode[0] = cudaAddressModeClamp;
tex_remap_dy_32f_C1__.addressMode[1] = cudaAddressModeClamp;
tex_remap_dy_32f_C1__.normalized = false;
tex_remap_dy_32f_C1__.filterMode = cudaFilterModePoint;
// bind src image to texture and use as input for reduction
cudaChannelFormatDesc channel_desc_8u_C1 = cudaCreateChannelDesc<unsigned char>();
cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<float>();
cudaBindTexture2D(0, &tex1_8u_C1__, src->data(), &channel_desc_8u_C1, src->width(), src->height(), src->pitch());
cudaBindTexture2D(0, &tex_remap_dx_32f_C1__, dx_map->data(), &channel_desc, dx_map->width(), dx_map->height(), dx_map->pitch());
cudaBindTexture2D(0, &tex_remap_dy_32f_C1__, dy_map->data(), &channel_desc, dy_map->width(), dy_map->height(), dy_map->pitch());
// fragmentation
unsigned int block_size = 16;
dim3 dimBlock(block_size, block_size);
dim3 dimGridOut(iu::divUp(dst->width(), dimBlock.x), iu::divUp(dst->height(), dimBlock.y));
// switch(interpolation)
// {
// case IU_INTERPOLATE_NEAREST:
// case IU_INTERPOLATE_CUBIC:
// case IU_INTERPOLATE_CUBIC_SPLINE:
// tex1_8u_C1__.filterMode = cudaFilterModePoint;
// break;
// case IU_INTERPOLATE_LINEAR:
// tex1_8u_C1__.filterMode = cudaFilterModeLinear;
// break;
// }
// switch(interpolation)
// {
// case IU_INTERPOLATE_LINEAR: // fallthrough intended
// cuRemapLinearInterpKernel_8u_C1 <<< dimGridOut, dimBlock >>> (
// dst->data(), dst->stride(), dst->width(), dst->height());
// break;
// default:
// case IU_INTERPOLATE_NEAREST:
cuRemapPointInterpKernel_8u_C1 <<< dimGridOut, dimBlock >>> (
dst->data(), dst->stride(), dst->width(), dst->height());
// break;
// }
cudaUnbindTexture(&tex1_8u_C1__);
cudaUnbindTexture(&tex_remap_dx_32f_C1__);
cudaUnbindTexture(&tex_remap_dy_32f_C1__);
IU_CUDA_CHECK();
}
} // namespace iuprivate
#endif // IUTRANSFORM_REMAP_CU
|
8d071f94778385e12932938945efb558f13cacbc.hip
|
// !!! This is a file automatically generated by hipify!!!
// Copyright (C) 2011, Chris Foster and the other authors and contributors.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of the software's owners nor the names of its
// contributors may be used to endorse or promote products derived from this
// software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// (This is the New BSD license)
#define COMPILE_FOR_GPU
#include "compute_flames.h"
#include <iostream>
#include <assert.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#include <cuda_gl_interop.h>
#include <thrust/device_vector.h>
#define ASSERT_CUDA_SUCCESS(expr) \
do { \
hipError_t err = expr; \
if(err != hipSuccess) \
{ \
std::cerr << "Cuda error: \"" << hipGetErrorString(err) \
<< "\" in calling " #expr "\n"; \
abort(); \
} \
} while(false)
#define ASSERT_KERNEL_SUCCESS(str) \
do { \
hipError_t err = hipGetLastError(); \
if(err != hipSuccess) \
{ \
std::cerr << "Cuda error: \"" << hipGetErrorString(err) \
<< "\" in calling " str "\n"; \
abort(); \
} \
} while(false)
// TODO: Fix this awful hard coded maximum!
#define MAX_MAPS 20
void initCuda()
{
ASSERT_CUDA_SUCCESS(hipGLSetGLDevice(0));
}
__global__ void rngInitKernel(hiprandState_t* generators, int n)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i >= n)
return;
hiprand_init(42, i, i, &generators[i]);
}
__global__ void flameGenKernel(IFSPoint* points, hiprandState_t* rngs,
int nThreads, long long nPoints,
FlameMapping* flameMaps, int nMaps)
{
long long id = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ FlameMapping maps[MAX_MAPS];
if(threadIdx.x < nMaps+1)
maps[threadIdx.x] = flameMaps[threadIdx.x];
syncthreads();
hiprandState_t gen = rngs[id];
const int discard = 20;
V2f p(0);
C3f col(0);
for(int i = 0; i < discard; ++i)
{
int mapIdx = hiprand(&gen) % nMaps;
const FlameMapping& m = maps[mapIdx];
p = m.map(p);
col = m.colorSpeed*m.col + (1-m.colorSpeed)*col;
}
for(long long i = id; i < nPoints; i += nThreads)
{
int mapIdx = hiprand(&gen) % nMaps;
const FlameMapping& m = maps[mapIdx];
p = m.map(p);
col = m.colorSpeed*m.col + (1-m.colorSpeed)*col;
// "out of loop" map is last one in the maps array.
points[i].pos = maps[nMaps].map(p);
points[i].col = col;
}
rngs[id] = gen;
}
struct GPUFlameEngine::Pimpl
{
int nThreads;
thrust::device_vector<FlameMapping> flameMaps;
thrust::device_vector<hiprandState_t> randState;
Pimpl(int nThreads)
: nThreads(nThreads),
flameMaps(),
randState(nThreads)
{ }
};
GPUFlameEngine::GPUFlameEngine()
: m_pimpl(0)
{
const int blockSize = 256;
const int nThreads = blockSize*(40000/blockSize);
m_pimpl = new Pimpl(nThreads);
hipLaunchKernelGGL(( rngInitKernel), dim3(ceildiv(m_pimpl->nThreads,blockSize)), dim3(blockSize), 0, 0,
thrust::raw_pointer_cast(&m_pimpl->randState[0]), m_pimpl->nThreads);
ASSERT_KERNEL_SUCCESS("rngInitKernel");
}
GPUFlameEngine::~GPUFlameEngine()
{
delete m_pimpl;
}
void GPUFlameEngine::generate(PointVBO* points, const FlameMaps& flameMaps)
{
// Can't get the new API to work for some reason...
// cudaGraphicsResource_t *cudaRes = 0;
// ASSERT_CUDA_SUCCESS(hipGraphicsGLRegisterBuffer(cudaRes, points->id(),
// hipGraphicsRegisterFlagsWriteDiscard));
// ASSERT_CUDA_SUCCESS(hipGraphicsMapResources(1, cudaRes));
// size_t nBytes = 0;
// ASSERT_CUDA_SUCCESS(hipGraphicsResourceGetMappedPointer((void**)&ifsPoints,
// &nBytes, *cudaRes));
// assert(nBytes/sizeof(IFSPoint) == points->size());
// ASSERT_CUDA_SUCCESS(hipGraphicsUnmapResources(1, cudaRes));
// ASSERT_CUDA_SUCCESS(hipGraphicsUnregisterResource(*cudaRes));
ASSERT_CUDA_SUCCESS(hipGLRegisterBufferObject(points->id()));
thrust::device_vector<FlameMapping>& flameMaps_d = m_pimpl->flameMaps;
flameMaps_d = flameMaps.maps;
flameMaps_d.push_back(flameMaps.finalMap);
if(flameMaps_d.size() > MAX_MAPS)
flameMaps_d.resize(MAX_MAPS);
IFSPoint* ifsPoints = 0;
ASSERT_CUDA_SUCCESS(hipGLMapBufferObject__((void**)&ifsPoints, points->id()));
const int blockSize = 256;
hipLaunchKernelGGL(( flameGenKernel), dim3(ceildiv(m_pimpl->nThreads, blockSize)), dim3(blockSize), 0, 0,
ifsPoints, thrust::raw_pointer_cast(&m_pimpl->randState[0]),
m_pimpl->nThreads, points->size(),
thrust::raw_pointer_cast(&flameMaps_d[0]),
flameMaps.maps.size()
);
ASSERT_KERNEL_SUCCESS("flameGenKernel");
ASSERT_CUDA_SUCCESS(hipGLUnmapBufferObject(points->id()));
ASSERT_CUDA_SUCCESS(hipGLUnregisterBufferObject(points->id()));
}
|
8d071f94778385e12932938945efb558f13cacbc.cu
|
// Copyright (C) 2011, Chris Foster and the other authors and contributors.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of the software's owners nor the names of its
// contributors may be used to endorse or promote products derived from this
// software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// (This is the New BSD license)
#define COMPILE_FOR_GPU
#include "compute_flames.h"
#include <iostream>
#include <assert.h>
#include <cuda.h>
#include <curand_kernel.h>
#include <cuda_gl_interop.h>
#include <thrust/device_vector.h>
#define ASSERT_CUDA_SUCCESS(expr) \
do { \
cudaError_t err = expr; \
if(err != cudaSuccess) \
{ \
std::cerr << "Cuda error: \"" << cudaGetErrorString(err) \
<< "\" in calling " #expr "\n"; \
abort(); \
} \
} while(false)
#define ASSERT_KERNEL_SUCCESS(str) \
do { \
cudaError_t err = cudaGetLastError(); \
if(err != cudaSuccess) \
{ \
std::cerr << "Cuda error: \"" << cudaGetErrorString(err) \
<< "\" in calling " str "\n"; \
abort(); \
} \
} while(false)
// TODO: Fix this awful hard coded maximum!
#define MAX_MAPS 20
void initCuda()
{
ASSERT_CUDA_SUCCESS(cudaGLSetGLDevice(0));
}
__global__ void rngInitKernel(curandState_t* generators, int n)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i >= n)
return;
curand_init(42, i, i, &generators[i]);
}
__global__ void flameGenKernel(IFSPoint* points, curandState_t* rngs,
int nThreads, long long nPoints,
FlameMapping* flameMaps, int nMaps)
{
long long id = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ FlameMapping maps[MAX_MAPS];
if(threadIdx.x < nMaps+1)
maps[threadIdx.x] = flameMaps[threadIdx.x];
syncthreads();
curandState_t gen = rngs[id];
const int discard = 20;
V2f p(0);
C3f col(0);
for(int i = 0; i < discard; ++i)
{
int mapIdx = curand(&gen) % nMaps;
const FlameMapping& m = maps[mapIdx];
p = m.map(p);
col = m.colorSpeed*m.col + (1-m.colorSpeed)*col;
}
for(long long i = id; i < nPoints; i += nThreads)
{
int mapIdx = curand(&gen) % nMaps;
const FlameMapping& m = maps[mapIdx];
p = m.map(p);
col = m.colorSpeed*m.col + (1-m.colorSpeed)*col;
// "out of loop" map is last one in the maps array.
points[i].pos = maps[nMaps].map(p);
points[i].col = col;
}
rngs[id] = gen;
}
struct GPUFlameEngine::Pimpl
{
int nThreads;
thrust::device_vector<FlameMapping> flameMaps;
thrust::device_vector<curandState_t> randState;
Pimpl(int nThreads)
: nThreads(nThreads),
flameMaps(),
randState(nThreads)
{ }
};
GPUFlameEngine::GPUFlameEngine()
: m_pimpl(0)
{
const int blockSize = 256;
const int nThreads = blockSize*(40000/blockSize);
m_pimpl = new Pimpl(nThreads);
rngInitKernel<<<ceildiv(m_pimpl->nThreads,blockSize), blockSize>>>(
thrust::raw_pointer_cast(&m_pimpl->randState[0]), m_pimpl->nThreads);
ASSERT_KERNEL_SUCCESS("rngInitKernel");
}
GPUFlameEngine::~GPUFlameEngine()
{
delete m_pimpl;
}
void GPUFlameEngine::generate(PointVBO* points, const FlameMaps& flameMaps)
{
// Can't get the new API to work for some reason...
// cudaGraphicsResource_t *cudaRes = 0;
// ASSERT_CUDA_SUCCESS(cudaGraphicsGLRegisterBuffer(cudaRes, points->id(),
// cudaGraphicsRegisterFlagsWriteDiscard));
// ASSERT_CUDA_SUCCESS(cudaGraphicsMapResources(1, cudaRes));
// size_t nBytes = 0;
// ASSERT_CUDA_SUCCESS(cudaGraphicsResourceGetMappedPointer((void**)&ifsPoints,
// &nBytes, *cudaRes));
// assert(nBytes/sizeof(IFSPoint) == points->size());
// ASSERT_CUDA_SUCCESS(cudaGraphicsUnmapResources(1, cudaRes));
// ASSERT_CUDA_SUCCESS(cudaGraphicsUnregisterResource(*cudaRes));
ASSERT_CUDA_SUCCESS(cudaGLRegisterBufferObject(points->id()));
thrust::device_vector<FlameMapping>& flameMaps_d = m_pimpl->flameMaps;
flameMaps_d = flameMaps.maps;
flameMaps_d.push_back(flameMaps.finalMap);
if(flameMaps_d.size() > MAX_MAPS)
flameMaps_d.resize(MAX_MAPS);
IFSPoint* ifsPoints = 0;
ASSERT_CUDA_SUCCESS(cudaGLMapBufferObject((void**)&ifsPoints, points->id()));
const int blockSize = 256;
flameGenKernel<<<ceildiv(m_pimpl->nThreads, blockSize), blockSize>>>(
ifsPoints, thrust::raw_pointer_cast(&m_pimpl->randState[0]),
m_pimpl->nThreads, points->size(),
thrust::raw_pointer_cast(&flameMaps_d[0]),
flameMaps.maps.size()
);
ASSERT_KERNEL_SUCCESS("flameGenKernel");
ASSERT_CUDA_SUCCESS(cudaGLUnmapBufferObject(points->id()));
ASSERT_CUDA_SUCCESS(cudaGLUnregisterBufferObject(points->id()));
}
|
iofuncs.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <complex.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include <hipfft.h>
#include <hip/hip_complex.h>
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include "dnsparams.h"
#include "iofuncs.h"
#include "fftfuncs.h"
//============================================================================================
// Print to screen
//============================================================================================
void displayDeviceProps(int numGPUs){
int i, driverVersion = 0, runtimeVersion = 0;
for( i = 0; i<numGPUs; ++i)
{
hipSetDevice(i);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, i);
printf(" Device name: %s\n", deviceProp.name);
hipDriverGetVersion(&driverVersion);
hipRuntimeGetVersion(&runtimeVersion);
printf(" CUDA Driver Version / Runtime Version %d.%d / %d.%d\n", driverVersion/1000, (driverVersion%100)/10, runtimeVersion/1000, (runtimeVersion%100)/10);
printf(" CUDA Capability Major/Minor version number: %d.%d\n", deviceProp.major, deviceProp.minor);
char msg[256];
SPRINTF(msg, " Total amount of global memory: %.0f MBytes \n",
(float)deviceProp.totalGlobalMem/1048576.0f);
printf("%s", msg);
printf(" (%2d) Multiprocessors, (%3d) CUDA Cores/MP: %d CUDA Cores\n",
deviceProp.multiProcessorCount,
_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor),
_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount);
printf("\n");
}
return;
}
void printTurbStats(int c, double steptime, statistics stats)
{
if(c==0)
printf("\n Entering time-stepping loop...\n");
// if(c%20==0) // Print new header every few timesteps
printf(" iter | u' | k | eps | l | eta | lambda | chi | |omega| | time \n"
"-----------------------------------------------------------\n");
// Print statistics to screen
printf(" %d | %2.3f | %2.3f | %2.3f | %2.3f | %2.3f | %2.3f | %2.3f | % 2.3f | %2.3f \n",
c*n_stats, stats.Vrms, stats.KE, stats.epsilon, stats.l, stats.eta, stats.lambda, stats.chi, stats.omega_z, steptime/1000);
return;
}
void printIterTime(int c, double steptime)
{
// Print iteration time to screen
printf(" %d | | | | | | | | |% 2.3f \n",
c,steptime/1000);
return;
}
//============================================================================================
// Write to file
//============================================================================================
void writeYprofs(const int c, const char* name, double *data)
{
char title [256];
FILE *out;
char folder [256];
snprintf(folder, sizeof(folder), sim_name);
snprintf(title, sizeof(title), "%s%sYprofs/%s.%i", rootdir, folder, name, c);
printf("Writing data to %s \n", title);
out = fopen(title, "wb");
fwrite(data, sizeof(double), NY, out);
fclose(out);
return;
}
void saveYprofs(const int c, profile data)
{ // Save mean profs to file
struct stat st = {0};
char title[256];
char folder [256];
snprintf(folder, sizeof(folder), sim_name);
if(c==0){ // Create directory for statistics if one doesn't already exist
snprintf(title, sizeof(title), "%s%s%s", rootdir, folder, "Yprofs/");
if (stat(title, &st) == -1) {
mkdir(title, 0700);
}
}
writeYprofs(c, "u_mean", data.u[0]);
writeYprofs(c, "v_mean", data.v[0]);
writeYprofs(c, "w_mean", data.w[0]);
writeYprofs(c, "s_mean", data.s[0]);
writeYprofs(c, "c_mean", data.c[0]);
return;
}
void writeDouble(double v, FILE *f) {
fwrite((void*)(&v), sizeof(v), 1, f);
return;
}
void writeStats(const int c, const char* name, double in) {
char title[256];
FILE *out;
char folder [256];
snprintf(folder, sizeof(folder), sim_name);
snprintf(title, sizeof(title), "%s%sstats/%s", rootdir, folder, name);
//printf("Writing data to %s \n", title);
if(c==0){ // First timestep, create new file
out = fopen(title, "wb");
}
else{ // append current timestep data to statistics file
out = fopen(title, "ab");
}
writeDouble(in, out);
fclose(out);
}
void saveStatsData(const int c, statistics stats)
{
struct stat st = {0};
char title[256];
char folder [256];
snprintf(folder, sizeof(folder), sim_name);
if(c==0){ // Create directory for statistics if one doesn't already exist
snprintf(title, sizeof(title), "%s%s%s", rootdir, folder, "stats/");
if (stat(title, &st) == -1) {
mkdir(title, 0700);
}
}
// Save statistics data
writeStats(c, "Vrms", stats.Vrms);
writeStats(c, "epsilon", stats.epsilon);
writeStats(c, "eta", stats.eta);
writeStats(c, "KE", stats.KE);
writeStats(c, "lambda", stats.lambda);
writeStats(c, "l", stats.l);
writeStats(c, "chi", stats.chi);
writeStats(c, "omega" , stats.omega);
writeStats(c, "omega_x", stats.omega_x);
writeStats(c, "omega_y", stats.omega_y);
writeStats(c, "omega_z", stats.omega_z);
// Loop required to write statistics that depend on a second variable
//for(i=0;i<64;++i){
// writeStats(1, "area_z", stats.area_scalar[i]);
// writeStats(1, "area_omega" , stats.area_omega[i]);
//}
snprintf(title, sizeof(title), "%s%s%s", rootdir, folder, "stats/");
printf("Statistics data written to %s \n", title);
return;
}
void writexyfields( gpudata gpu, const int iter, const char var, double **in, const int zplane )
{
int i, j, n, idx;
char title[256];
char folder [256];
snprintf(folder, sizeof(folder), sim_name);
snprintf(title, sizeof(title), "%s%svis/%c%s.%i", rootdir, folder, var, "_xy", iter);
printf("Saving data to %s \n", title);
FILE *out = fopen(title, "wb");
writeDouble(sizeof(double) * NX*NY, out);
for (n = 0; n < gpu.nGPUs; ++n){
for (i = 0; i < gpu.nx[n]; ++i){
for (j = 0; j < NY; ++j){
idx = zplane + 2*NZ2*j + 2*NZ2*NY*i; // Using padded index for in-place FFT
writeDouble(in[n][idx], out);
}
}
}
fclose(out);
return;
}
void writexzfields( gpudata gpu, const int iter, const char var, double **in, const int yplane )
{
int i, k, n, idx;
char title[256];
char folder [256];
snprintf(folder, sizeof(folder), sim_name);
snprintf(title, sizeof(title), "%s%svis/%c%s.%i", rootdir, folder, var, "_xz", iter);
printf("Saving data to %s \n", title);
FILE *out = fopen(title, "wb");
writeDouble(sizeof(double) * NX*NZ, out);
// writelu(sizeof(double) * NX*NY*NZ, out);
k=0;
for (n = 0; n < gpu.nGPUs; ++n){
for (i = 0; i < gpu.nx[n]; ++i){
idx = k + 2*NZ2*yplane + 2*NZ2*NY*i; // Using padded index for in-place FFT
fwrite((void *)&in[n][idx], sizeof(double), NZ, out); // Write each k vector at once
}
}
fclose(out);
return;
}
void writeyzfields( gpudata gpu, const int iter, const char var, double **in, const int xplane )
{
int j, k, n, idx;
char title[256];
char folder [256];
snprintf(folder, sizeof(folder), sim_name);
snprintf(title, sizeof(title), "%s%svis/%c%s.%i", rootdir, folder, var, "_yz", iter);
printf("Saving data to %s \n", title);
FILE *out = fopen(title, "wb");
writeDouble(sizeof(double) * NY*NZ, out);
// writelu(sizeof(double) * NX*NY*NZ, out);
k=0;
for (n = 0; n < gpu.nGPUs; ++n){
for (j = 0; j < NY; ++j){
idx = k + 2*NZ2*j + 2*NZ2*NY*xplane; // Using padded index for in-place FFT
fwrite((void *)&in[n][idx], sizeof(double), NZ, out); // Write each k vector at once
}
}
fclose(out);
return;
}
void save2Dfields(int c, fftdata fft, gpudata gpu, fielddata h_vel, fielddata vel)
{
int n;
char title[256];
struct stat st = {0};
char folder [256];
snprintf(folder, sizeof(folder), sim_name);
if(c==0){ // Create new directory for visualization data if one doesn't already exist
snprintf(title, sizeof(title), "%s%s%s", rootdir, folder, "vis/");
if (stat(title, &st) == -1) {
mkdir(title, 0700);
}
// Copy data to host
for(n=0; n<gpu.nGPUs; ++n){
hipSetDevice(n);
checkCudaErrors( hipMemcpyAsync(h_vel.u[n], vel.u[n], sizeof(complex double)*gpu.nx[n]*NY*NZ2, hipMemcpyDefault) );
checkCudaErrors( hipMemcpyAsync(h_vel.v[n], vel.v[n], sizeof(complex double)*gpu.nx[n]*NY*NZ2, hipMemcpyDefault) );
checkCudaErrors( hipMemcpyAsync(h_vel.w[n], vel.w[n], sizeof(complex double)*gpu.nx[n]*NY*NZ2, hipMemcpyDefault) );
checkCudaErrors( hipMemcpyAsync(h_vel.s[n], vel.s[n], sizeof(complex double)*gpu.nx[n]*NY*NZ2, hipMemcpyDefault) );
checkCudaErrors( hipMemcpyAsync(h_vel.c[n], vel.c[n], sizeof(complex double)*gpu.nx[n]*NY*NZ2, hipMemcpyDefault) );
}
writexyfields( gpu, c, 'u', h_vel.u, NZ/2);
writexyfields( gpu, c, 'v', h_vel.v, NZ/2);
writexyfields( gpu, c, 'w', h_vel.w, NZ/2);
writexyfields( gpu, c, 'z', h_vel.s, NZ/2);
writexyfields( gpu, c, 'c', h_vel.c, NZ/2);
}
else{
// Inverse Fourier Transform the velocity back to physical space for saving to file.
inverseTransform(fft, gpu, vel.uh);
inverseTransform(fft, gpu, vel.vh);
inverseTransform(fft, gpu, vel.wh);
inverseTransform(fft, gpu, vel.sh);
inverseTransform(fft, gpu, vel.ch);
// Copy data to host
for(n=0; n<gpu.nGPUs; ++n){
hipSetDevice(n);
checkCudaErrors( hipMemcpyAsync(h_vel.u[n], vel.u[n], sizeof(complex double)*gpu.nx[n]*NY*NZ2, hipMemcpyDefault) );
checkCudaErrors( hipMemcpyAsync(h_vel.v[n], vel.v[n], sizeof(complex double)*gpu.nx[n]*NY*NZ2, hipMemcpyDefault) );
checkCudaErrors( hipMemcpyAsync(h_vel.w[n], vel.w[n], sizeof(complex double)*gpu.nx[n]*NY*NZ2, hipMemcpyDefault) );
checkCudaErrors( hipMemcpyAsync(h_vel.s[n], vel.s[n], sizeof(complex double)*gpu.nx[n]*NY*NZ2, hipMemcpyDefault) );
checkCudaErrors( hipMemcpyAsync(h_vel.c[n], vel.c[n], sizeof(complex double)*gpu.nx[n]*NY*NZ2, hipMemcpyDefault) );
}
// Write data to file
writexyfields(gpu, c, 'u', h_vel.u, NZ/2);
writexyfields(gpu, c, 'v', h_vel.v, NZ/2);
writexyfields(gpu, c, 'w', h_vel.w, NZ/2);
writexyfields(gpu, c, 'z', h_vel.s, NZ/2);
writexyfields(gpu, c, 'c', h_vel.c, NZ/2);
// Transform fields back to fourier space for timestepping
forwardTransform(fft, gpu, vel.u);
forwardTransform(fft, gpu, vel.v);
forwardTransform(fft, gpu, vel.w);
forwardTransform(fft, gpu, vel.s);
forwardTransform(fft, gpu, vel.c);
}
return;
}
void write3Dfields_mgpu(gpudata gpu, const int iter, const char var, double **in )
{
int i, j, k, n, idx;
char title[256];
char folder [256];
snprintf(folder, sizeof(folder), sim_name);
snprintf(title, sizeof(title), "%s%s%c.%i", rootdir, folder, var, iter);
printf("Saving data to %s \n", title);
FILE *out = fopen(title, "wb");
writeDouble(sizeof(double) * NX*NY*NZ, out);
// writelu(sizeof(double) * NX*NY*NZ, out);
k=0;
for (n = 0; n < gpu.nGPUs; ++n){
for (i = 0; i < gpu.nx[n]; ++i){
for (j = 0; j < NY; ++j){
// for (k = 0; k < NZ; ++k){
idx = k + 2*NZ2*j + 2*NZ2*NY*i; // Using padded index for in-place FFT
fwrite((void *)&in[n][idx], sizeof(double), NZ, out); // Write each k vector at once
//writeDouble(in[n][idx], out);
//}
}
}
}
fclose(out);
return;
}
void save3Dfields(int c, fftdata fft, gpudata gpu, fielddata h_vel, fielddata vel){
int n;
struct stat st = {0};
char title [256];
char folder [256];
snprintf(folder, sizeof(folder), sim_name);
snprintf(title, sizeof(title), "%s%s", rootdir, folder);
//printf("Saving data to %s\n",title);
if(c==0){
if (stat(title, &st) == -1) { // Create root directory for DNS data if one doesn't already exist
mkdir(title, 0700);
}
printf("Saving initial data...\n");
for(n=0; n<gpu.nGPUs; ++n){
hipSetDevice(n);
checkCudaErrors( hipMemcpyAsync(h_vel.u[n], vel.u[n], sizeof(complex double)*gpu.nx[n]*NY*NZ2, hipMemcpyDefault) );
checkCudaErrors( hipMemcpyAsync(h_vel.v[n], vel.v[n], sizeof(complex double)*gpu.nx[n]*NY*NZ2, hipMemcpyDefault) );
checkCudaErrors( hipMemcpyAsync(h_vel.w[n], vel.w[n], sizeof(complex double)*gpu.nx[n]*NY*NZ2, hipMemcpyDefault) );
checkCudaErrors( hipMemcpyAsync(h_vel.s[n], vel.s[n], sizeof(complex double)*gpu.nx[n]*NY*NZ2, hipMemcpyDefault) );
checkCudaErrors( hipMemcpyAsync(h_vel.c[n], vel.c[n], sizeof(complex double)*gpu.nx[n]*NY*NZ2, hipMemcpyDefault) );
}
// Write data to file
write3Dfields_mgpu(gpu, 0, 'u', h_vel.u);
write3Dfields_mgpu(gpu, 0, 'v', h_vel.v);
write3Dfields_mgpu(gpu, 0, 'w', h_vel.w);
write3Dfields_mgpu(gpu, 0, 'z', h_vel.s);
write3Dfields_mgpu(gpu, 0, 'c', h_vel.c);
return;
}
else{
// Inverse Fourier Transform the velocity back to physical space for saving to file.
inverseTransform(fft, gpu, vel.uh);
inverseTransform(fft, gpu, vel.vh);
inverseTransform(fft, gpu, vel.wh);
inverseTransform(fft, gpu, vel.sh);
inverseTransform(fft, gpu, vel.ch);
// Copy data to host
printf( "Timestep %i Complete. . .\n", c );
for(n=0; n<gpu.nGPUs; ++n){
hipSetDevice(n);
hipDeviceSynchronize();
checkCudaErrors( hipMemcpyAsync(h_vel.u[n], vel.u[n], sizeof(complex double)*gpu.nx[n]*NY*NZ2, hipMemcpyDefault) );
checkCudaErrors( hipMemcpyAsync(h_vel.v[n], vel.v[n], sizeof(complex double)*gpu.nx[n]*NY*NZ2, hipMemcpyDefault) );
checkCudaErrors( hipMemcpyAsync(h_vel.w[n], vel.w[n], sizeof(complex double)*gpu.nx[n]*NY*NZ2, hipMemcpyDefault) );
checkCudaErrors( hipMemcpyAsync(h_vel.s[n], vel.s[n], sizeof(complex double)*gpu.nx[n]*NY*NZ2, hipMemcpyDefault) );
checkCudaErrors( hipMemcpyAsync(h_vel.c[n], vel.c[n], sizeof(complex double)*gpu.nx[n]*NY*NZ2, hipMemcpyDefault) );
}
// Write data to file
write3Dfields_mgpu(gpu, c, 'u', h_vel.u);
write3Dfields_mgpu(gpu, c, 'v', h_vel.v);
write3Dfields_mgpu(gpu, c, 'w', h_vel.w);
write3Dfields_mgpu(gpu, c, 'z', h_vel.s);
write3Dfields_mgpu(gpu, c, 'c', h_vel.c);
// Transform fields back to fourier space for timestepping
forwardTransform(fft, gpu, vel.u);
forwardTransform(fft, gpu, vel.v);
forwardTransform(fft, gpu, vel.w);
forwardTransform(fft, gpu, vel.s);
forwardTransform(fft, gpu, vel.c);
return;
}
}
//============================================================================================
// Import from file
//============================================================================================
int readDataSize(FILE *f){
int bin;
int flag = fread((void*)(&bin), sizeof(float), 1, f);
if(flag == 1)
return bin;
else{
return 0;
}
}
double readDouble(FILE *f){
double v;
int flag = fread((void*)(&v), sizeof(double), 1, f);
if(flag == 1)
return v;
else{
return 0;
}
}
void loadData(gpudata gpu, const char *name, double **var)
{ // Function to read in velocity data into multiple GPUs
int i, j, k, n, idx, N;
char DataLocation[256];
char title[256];
// Default
snprintf(DataLocation, sizeof(DataLocation), "/home/bblakeley/Documents/Research/DNS_Data/Flamelet_Data/R2/%s.0",name);
snprintf(title, sizeof(title), DataLocation, name);
printf("Importing data from %s \n", title);
FILE *file = fopen(title, "rb");
N = readDouble(file)/sizeof(double);
if(N!=NX*NY*NZ) {
printf("Error! N!=NX*NY*NZ");
return;
}
printf("Reading data from ");
for (n = 0; n < gpu.nGPUs; ++n){
printf("GPU %d",n);
for (i = 0; i < gpu.nx[n]; ++i){
for (j = 0; j < NY; ++j){
for (k = 0; k < NZ; ++k){
idx = k + 2*NZ2*j + 2*NZ2*NY*i;
var[n][idx] = readDouble(file);
}
}
}
printf(" ... Done!\n");
}
fclose(file);
return;
}
void importVelocity(gpudata gpu, fielddata h_vel, fielddata vel)
{ // Import data from file
int n;
loadData(gpu, "u", h_vel.u);
loadData(gpu, "v", h_vel.v);
loadData(gpu, "w", h_vel.w);
// Copy data from host to device
// printf("Copy results to GPU memory...\n");
for(n=0; n<gpu.nGPUs; ++n){
hipSetDevice(n);
hipDeviceSynchronize();
checkCudaErrors( hipMemcpyAsync(vel.u[n], h_vel.u[n], sizeof(hipfftDoubleComplex)*gpu.nx[n]*NY*NZ2, hipMemcpyDefault) );
checkCudaErrors( hipMemcpyAsync(vel.v[n], h_vel.v[n], sizeof(hipfftDoubleComplex)*gpu.nx[n]*NY*NZ2, hipMemcpyDefault) );
checkCudaErrors( hipMemcpyAsync(vel.w[n], h_vel.w[n], sizeof(hipfftDoubleComplex)*gpu.nx[n]*NY*NZ2, hipMemcpyDefault) );
}
return;
}
void importScalar(gpudata gpu, fielddata h_vel, fielddata vel)
{ // Import data from file
int n;
loadData(gpu, "z", h_vel.s);
// Copy data from host to device
// printf("Copy results to GPU memory...\n");
for(n=0; n<gpu.nGPUs; ++n){
hipSetDevice(n);
hipDeviceSynchronize();
checkCudaErrors( hipMemcpyAsync(vel.s[n], h_vel.s[n], sizeof(hipfftDoubleComplex)*gpu.nx[n]*NY*NZ2, hipMemcpyDefault) );
}
return;
}
void importData(gpudata gpu, fielddata h_vel, fielddata vel) // Deprecated
{ // Import data
importVelocity(gpu, h_vel, vel);
importScalar(gpu, h_vel, vel);
return;
}
|
iofuncs.cu
|
#include <stdlib.h>
#include <stdio.h>
#include <complex.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include <cufft.h>
#include <cuComplex.h>
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include "dnsparams.h"
#include "iofuncs.h"
#include "fftfuncs.h"
//============================================================================================
// Print to screen
//============================================================================================
void displayDeviceProps(int numGPUs){
int i, driverVersion = 0, runtimeVersion = 0;
for( i = 0; i<numGPUs; ++i)
{
cudaSetDevice(i);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, i);
printf(" Device name: %s\n", deviceProp.name);
cudaDriverGetVersion(&driverVersion);
cudaRuntimeGetVersion(&runtimeVersion);
printf(" CUDA Driver Version / Runtime Version %d.%d / %d.%d\n", driverVersion/1000, (driverVersion%100)/10, runtimeVersion/1000, (runtimeVersion%100)/10);
printf(" CUDA Capability Major/Minor version number: %d.%d\n", deviceProp.major, deviceProp.minor);
char msg[256];
SPRINTF(msg, " Total amount of global memory: %.0f MBytes \n",
(float)deviceProp.totalGlobalMem/1048576.0f);
printf("%s", msg);
printf(" (%2d) Multiprocessors, (%3d) CUDA Cores/MP: %d CUDA Cores\n",
deviceProp.multiProcessorCount,
_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor),
_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount);
printf("\n");
}
return;
}
void printTurbStats(int c, double steptime, statistics stats)
{
if(c==0)
printf("\n Entering time-stepping loop...\n");
// if(c%20==0) // Print new header every few timesteps
printf(" iter | u' | k | eps | l | eta | lambda | chi | |omega| | time \n"
"-----------------------------------------------------------\n");
// Print statistics to screen
printf(" %d | %2.3f | %2.3f | %2.3f | %2.3f | %2.3f | %2.3f | %2.3f | % 2.3f | %2.3f \n",
c*n_stats, stats.Vrms, stats.KE, stats.epsilon, stats.l, stats.eta, stats.lambda, stats.chi, stats.omega_z, steptime/1000);
return;
}
void printIterTime(int c, double steptime)
{
// Print iteration time to screen
printf(" %d | | | | | | | | |% 2.3f \n",
c,steptime/1000);
return;
}
//============================================================================================
// Write to file
//============================================================================================
void writeYprofs(const int c, const char* name, double *data)
{
char title [256];
FILE *out;
char folder [256];
snprintf(folder, sizeof(folder), sim_name);
snprintf(title, sizeof(title), "%s%sYprofs/%s.%i", rootdir, folder, name, c);
printf("Writing data to %s \n", title);
out = fopen(title, "wb");
fwrite(data, sizeof(double), NY, out);
fclose(out);
return;
}
void saveYprofs(const int c, profile data)
{ // Save mean profs to file
struct stat st = {0};
char title[256];
char folder [256];
snprintf(folder, sizeof(folder), sim_name);
if(c==0){ // Create directory for statistics if one doesn't already exist
snprintf(title, sizeof(title), "%s%s%s", rootdir, folder, "Yprofs/");
if (stat(title, &st) == -1) {
mkdir(title, 0700);
}
}
writeYprofs(c, "u_mean", data.u[0]);
writeYprofs(c, "v_mean", data.v[0]);
writeYprofs(c, "w_mean", data.w[0]);
writeYprofs(c, "s_mean", data.s[0]);
writeYprofs(c, "c_mean", data.c[0]);
return;
}
void writeDouble(double v, FILE *f) {
fwrite((void*)(&v), sizeof(v), 1, f);
return;
}
void writeStats(const int c, const char* name, double in) {
char title[256];
FILE *out;
char folder [256];
snprintf(folder, sizeof(folder), sim_name);
snprintf(title, sizeof(title), "%s%sstats/%s", rootdir, folder, name);
//printf("Writing data to %s \n", title);
if(c==0){ // First timestep, create new file
out = fopen(title, "wb");
}
else{ // append current timestep data to statistics file
out = fopen(title, "ab");
}
writeDouble(in, out);
fclose(out);
}
void saveStatsData(const int c, statistics stats)
{
struct stat st = {0};
char title[256];
char folder [256];
snprintf(folder, sizeof(folder), sim_name);
if(c==0){ // Create directory for statistics if one doesn't already exist
snprintf(title, sizeof(title), "%s%s%s", rootdir, folder, "stats/");
if (stat(title, &st) == -1) {
mkdir(title, 0700);
}
}
// Save statistics data
writeStats(c, "Vrms", stats.Vrms);
writeStats(c, "epsilon", stats.epsilon);
writeStats(c, "eta", stats.eta);
writeStats(c, "KE", stats.KE);
writeStats(c, "lambda", stats.lambda);
writeStats(c, "l", stats.l);
writeStats(c, "chi", stats.chi);
writeStats(c, "omega" , stats.omega);
writeStats(c, "omega_x", stats.omega_x);
writeStats(c, "omega_y", stats.omega_y);
writeStats(c, "omega_z", stats.omega_z);
// Loop required to write statistics that depend on a second variable
//for(i=0;i<64;++i){
// writeStats(1, "area_z", stats.area_scalar[i]);
// writeStats(1, "area_omega" , stats.area_omega[i]);
//}
snprintf(title, sizeof(title), "%s%s%s", rootdir, folder, "stats/");
printf("Statistics data written to %s \n", title);
return;
}
void writexyfields( gpudata gpu, const int iter, const char var, double **in, const int zplane )
{
int i, j, n, idx;
char title[256];
char folder [256];
snprintf(folder, sizeof(folder), sim_name);
snprintf(title, sizeof(title), "%s%svis/%c%s.%i", rootdir, folder, var, "_xy", iter);
printf("Saving data to %s \n", title);
FILE *out = fopen(title, "wb");
writeDouble(sizeof(double) * NX*NY, out);
for (n = 0; n < gpu.nGPUs; ++n){
for (i = 0; i < gpu.nx[n]; ++i){
for (j = 0; j < NY; ++j){
idx = zplane + 2*NZ2*j + 2*NZ2*NY*i; // Using padded index for in-place FFT
writeDouble(in[n][idx], out);
}
}
}
fclose(out);
return;
}
void writexzfields( gpudata gpu, const int iter, const char var, double **in, const int yplane )
{
int i, k, n, idx;
char title[256];
char folder [256];
snprintf(folder, sizeof(folder), sim_name);
snprintf(title, sizeof(title), "%s%svis/%c%s.%i", rootdir, folder, var, "_xz", iter);
printf("Saving data to %s \n", title);
FILE *out = fopen(title, "wb");
writeDouble(sizeof(double) * NX*NZ, out);
// writelu(sizeof(double) * NX*NY*NZ, out);
k=0;
for (n = 0; n < gpu.nGPUs; ++n){
for (i = 0; i < gpu.nx[n]; ++i){
idx = k + 2*NZ2*yplane + 2*NZ2*NY*i; // Using padded index for in-place FFT
fwrite((void *)&in[n][idx], sizeof(double), NZ, out); // Write each k vector at once
}
}
fclose(out);
return;
}
void writeyzfields( gpudata gpu, const int iter, const char var, double **in, const int xplane )
{
int j, k, n, idx;
char title[256];
char folder [256];
snprintf(folder, sizeof(folder), sim_name);
snprintf(title, sizeof(title), "%s%svis/%c%s.%i", rootdir, folder, var, "_yz", iter);
printf("Saving data to %s \n", title);
FILE *out = fopen(title, "wb");
writeDouble(sizeof(double) * NY*NZ, out);
// writelu(sizeof(double) * NX*NY*NZ, out);
k=0;
for (n = 0; n < gpu.nGPUs; ++n){
for (j = 0; j < NY; ++j){
idx = k + 2*NZ2*j + 2*NZ2*NY*xplane; // Using padded index for in-place FFT
fwrite((void *)&in[n][idx], sizeof(double), NZ, out); // Write each k vector at once
}
}
fclose(out);
return;
}
void save2Dfields(int c, fftdata fft, gpudata gpu, fielddata h_vel, fielddata vel)
{
int n;
char title[256];
struct stat st = {0};
char folder [256];
snprintf(folder, sizeof(folder), sim_name);
if(c==0){ // Create new directory for visualization data if one doesn't already exist
snprintf(title, sizeof(title), "%s%s%s", rootdir, folder, "vis/");
if (stat(title, &st) == -1) {
mkdir(title, 0700);
}
// Copy data to host
for(n=0; n<gpu.nGPUs; ++n){
cudaSetDevice(n);
checkCudaErrors( cudaMemcpyAsync(h_vel.u[n], vel.u[n], sizeof(complex double)*gpu.nx[n]*NY*NZ2, cudaMemcpyDefault) );
checkCudaErrors( cudaMemcpyAsync(h_vel.v[n], vel.v[n], sizeof(complex double)*gpu.nx[n]*NY*NZ2, cudaMemcpyDefault) );
checkCudaErrors( cudaMemcpyAsync(h_vel.w[n], vel.w[n], sizeof(complex double)*gpu.nx[n]*NY*NZ2, cudaMemcpyDefault) );
checkCudaErrors( cudaMemcpyAsync(h_vel.s[n], vel.s[n], sizeof(complex double)*gpu.nx[n]*NY*NZ2, cudaMemcpyDefault) );
checkCudaErrors( cudaMemcpyAsync(h_vel.c[n], vel.c[n], sizeof(complex double)*gpu.nx[n]*NY*NZ2, cudaMemcpyDefault) );
}
writexyfields( gpu, c, 'u', h_vel.u, NZ/2);
writexyfields( gpu, c, 'v', h_vel.v, NZ/2);
writexyfields( gpu, c, 'w', h_vel.w, NZ/2);
writexyfields( gpu, c, 'z', h_vel.s, NZ/2);
writexyfields( gpu, c, 'c', h_vel.c, NZ/2);
}
else{
// Inverse Fourier Transform the velocity back to physical space for saving to file.
inverseTransform(fft, gpu, vel.uh);
inverseTransform(fft, gpu, vel.vh);
inverseTransform(fft, gpu, vel.wh);
inverseTransform(fft, gpu, vel.sh);
inverseTransform(fft, gpu, vel.ch);
// Copy data to host
for(n=0; n<gpu.nGPUs; ++n){
cudaSetDevice(n);
checkCudaErrors( cudaMemcpyAsync(h_vel.u[n], vel.u[n], sizeof(complex double)*gpu.nx[n]*NY*NZ2, cudaMemcpyDefault) );
checkCudaErrors( cudaMemcpyAsync(h_vel.v[n], vel.v[n], sizeof(complex double)*gpu.nx[n]*NY*NZ2, cudaMemcpyDefault) );
checkCudaErrors( cudaMemcpyAsync(h_vel.w[n], vel.w[n], sizeof(complex double)*gpu.nx[n]*NY*NZ2, cudaMemcpyDefault) );
checkCudaErrors( cudaMemcpyAsync(h_vel.s[n], vel.s[n], sizeof(complex double)*gpu.nx[n]*NY*NZ2, cudaMemcpyDefault) );
checkCudaErrors( cudaMemcpyAsync(h_vel.c[n], vel.c[n], sizeof(complex double)*gpu.nx[n]*NY*NZ2, cudaMemcpyDefault) );
}
// Write data to file
writexyfields(gpu, c, 'u', h_vel.u, NZ/2);
writexyfields(gpu, c, 'v', h_vel.v, NZ/2);
writexyfields(gpu, c, 'w', h_vel.w, NZ/2);
writexyfields(gpu, c, 'z', h_vel.s, NZ/2);
writexyfields(gpu, c, 'c', h_vel.c, NZ/2);
// Transform fields back to fourier space for timestepping
forwardTransform(fft, gpu, vel.u);
forwardTransform(fft, gpu, vel.v);
forwardTransform(fft, gpu, vel.w);
forwardTransform(fft, gpu, vel.s);
forwardTransform(fft, gpu, vel.c);
}
return;
}
void write3Dfields_mgpu(gpudata gpu, const int iter, const char var, double **in )
{
int i, j, k, n, idx;
char title[256];
char folder [256];
snprintf(folder, sizeof(folder), sim_name);
snprintf(title, sizeof(title), "%s%s%c.%i", rootdir, folder, var, iter);
printf("Saving data to %s \n", title);
FILE *out = fopen(title, "wb");
writeDouble(sizeof(double) * NX*NY*NZ, out);
// writelu(sizeof(double) * NX*NY*NZ, out);
k=0;
for (n = 0; n < gpu.nGPUs; ++n){
for (i = 0; i < gpu.nx[n]; ++i){
for (j = 0; j < NY; ++j){
// for (k = 0; k < NZ; ++k){
idx = k + 2*NZ2*j + 2*NZ2*NY*i; // Using padded index for in-place FFT
fwrite((void *)&in[n][idx], sizeof(double), NZ, out); // Write each k vector at once
//writeDouble(in[n][idx], out);
//}
}
}
}
fclose(out);
return;
}
void save3Dfields(int c, fftdata fft, gpudata gpu, fielddata h_vel, fielddata vel){
int n;
struct stat st = {0};
char title [256];
char folder [256];
snprintf(folder, sizeof(folder), sim_name);
snprintf(title, sizeof(title), "%s%s", rootdir, folder);
//printf("Saving data to %s\n",title);
if(c==0){
if (stat(title, &st) == -1) { // Create root directory for DNS data if one doesn't already exist
mkdir(title, 0700);
}
printf("Saving initial data...\n");
for(n=0; n<gpu.nGPUs; ++n){
cudaSetDevice(n);
checkCudaErrors( cudaMemcpyAsync(h_vel.u[n], vel.u[n], sizeof(complex double)*gpu.nx[n]*NY*NZ2, cudaMemcpyDefault) );
checkCudaErrors( cudaMemcpyAsync(h_vel.v[n], vel.v[n], sizeof(complex double)*gpu.nx[n]*NY*NZ2, cudaMemcpyDefault) );
checkCudaErrors( cudaMemcpyAsync(h_vel.w[n], vel.w[n], sizeof(complex double)*gpu.nx[n]*NY*NZ2, cudaMemcpyDefault) );
checkCudaErrors( cudaMemcpyAsync(h_vel.s[n], vel.s[n], sizeof(complex double)*gpu.nx[n]*NY*NZ2, cudaMemcpyDefault) );
checkCudaErrors( cudaMemcpyAsync(h_vel.c[n], vel.c[n], sizeof(complex double)*gpu.nx[n]*NY*NZ2, cudaMemcpyDefault) );
}
// Write data to file
write3Dfields_mgpu(gpu, 0, 'u', h_vel.u);
write3Dfields_mgpu(gpu, 0, 'v', h_vel.v);
write3Dfields_mgpu(gpu, 0, 'w', h_vel.w);
write3Dfields_mgpu(gpu, 0, 'z', h_vel.s);
write3Dfields_mgpu(gpu, 0, 'c', h_vel.c);
return;
}
else{
// Inverse Fourier Transform the velocity back to physical space for saving to file.
inverseTransform(fft, gpu, vel.uh);
inverseTransform(fft, gpu, vel.vh);
inverseTransform(fft, gpu, vel.wh);
inverseTransform(fft, gpu, vel.sh);
inverseTransform(fft, gpu, vel.ch);
// Copy data to host
printf( "Timestep %i Complete. . .\n", c );
for(n=0; n<gpu.nGPUs; ++n){
cudaSetDevice(n);
cudaDeviceSynchronize();
checkCudaErrors( cudaMemcpyAsync(h_vel.u[n], vel.u[n], sizeof(complex double)*gpu.nx[n]*NY*NZ2, cudaMemcpyDefault) );
checkCudaErrors( cudaMemcpyAsync(h_vel.v[n], vel.v[n], sizeof(complex double)*gpu.nx[n]*NY*NZ2, cudaMemcpyDefault) );
checkCudaErrors( cudaMemcpyAsync(h_vel.w[n], vel.w[n], sizeof(complex double)*gpu.nx[n]*NY*NZ2, cudaMemcpyDefault) );
checkCudaErrors( cudaMemcpyAsync(h_vel.s[n], vel.s[n], sizeof(complex double)*gpu.nx[n]*NY*NZ2, cudaMemcpyDefault) );
checkCudaErrors( cudaMemcpyAsync(h_vel.c[n], vel.c[n], sizeof(complex double)*gpu.nx[n]*NY*NZ2, cudaMemcpyDefault) );
}
// Write data to file
write3Dfields_mgpu(gpu, c, 'u', h_vel.u);
write3Dfields_mgpu(gpu, c, 'v', h_vel.v);
write3Dfields_mgpu(gpu, c, 'w', h_vel.w);
write3Dfields_mgpu(gpu, c, 'z', h_vel.s);
write3Dfields_mgpu(gpu, c, 'c', h_vel.c);
// Transform fields back to fourier space for timestepping
forwardTransform(fft, gpu, vel.u);
forwardTransform(fft, gpu, vel.v);
forwardTransform(fft, gpu, vel.w);
forwardTransform(fft, gpu, vel.s);
forwardTransform(fft, gpu, vel.c);
return;
}
}
//============================================================================================
// Import from file
//============================================================================================
int readDataSize(FILE *f){
int bin;
int flag = fread((void*)(&bin), sizeof(float), 1, f);
if(flag == 1)
return bin;
else{
return 0;
}
}
double readDouble(FILE *f){
double v;
int flag = fread((void*)(&v), sizeof(double), 1, f);
if(flag == 1)
return v;
else{
return 0;
}
}
void loadData(gpudata gpu, const char *name, double **var)
{ // Function to read in velocity data into multiple GPUs
int i, j, k, n, idx, N;
char DataLocation[256];
char title[256];
// Default
snprintf(DataLocation, sizeof(DataLocation), "/home/bblakeley/Documents/Research/DNS_Data/Flamelet_Data/R2/%s.0",name);
snprintf(title, sizeof(title), DataLocation, name);
printf("Importing data from %s \n", title);
FILE *file = fopen(title, "rb");
N = readDouble(file)/sizeof(double);
if(N!=NX*NY*NZ) {
printf("Error! N!=NX*NY*NZ");
return;
}
printf("Reading data from ");
for (n = 0; n < gpu.nGPUs; ++n){
printf("GPU %d",n);
for (i = 0; i < gpu.nx[n]; ++i){
for (j = 0; j < NY; ++j){
for (k = 0; k < NZ; ++k){
idx = k + 2*NZ2*j + 2*NZ2*NY*i;
var[n][idx] = readDouble(file);
}
}
}
printf(" ... Done!\n");
}
fclose(file);
return;
}
void importVelocity(gpudata gpu, fielddata h_vel, fielddata vel)
{ // Import data from file
int n;
loadData(gpu, "u", h_vel.u);
loadData(gpu, "v", h_vel.v);
loadData(gpu, "w", h_vel.w);
// Copy data from host to device
// printf("Copy results to GPU memory...\n");
for(n=0; n<gpu.nGPUs; ++n){
cudaSetDevice(n);
cudaDeviceSynchronize();
checkCudaErrors( cudaMemcpyAsync(vel.u[n], h_vel.u[n], sizeof(cufftDoubleComplex)*gpu.nx[n]*NY*NZ2, cudaMemcpyDefault) );
checkCudaErrors( cudaMemcpyAsync(vel.v[n], h_vel.v[n], sizeof(cufftDoubleComplex)*gpu.nx[n]*NY*NZ2, cudaMemcpyDefault) );
checkCudaErrors( cudaMemcpyAsync(vel.w[n], h_vel.w[n], sizeof(cufftDoubleComplex)*gpu.nx[n]*NY*NZ2, cudaMemcpyDefault) );
}
return;
}
void importScalar(gpudata gpu, fielddata h_vel, fielddata vel)
{ // Import data from file
int n;
loadData(gpu, "z", h_vel.s);
// Copy data from host to device
// printf("Copy results to GPU memory...\n");
for(n=0; n<gpu.nGPUs; ++n){
cudaSetDevice(n);
cudaDeviceSynchronize();
checkCudaErrors( cudaMemcpyAsync(vel.s[n], h_vel.s[n], sizeof(cufftDoubleComplex)*gpu.nx[n]*NY*NZ2, cudaMemcpyDefault) );
}
return;
}
void importData(gpudata gpu, fielddata h_vel, fielddata vel) // Deprecated
{ // Import data
importVelocity(gpu, h_vel, vel);
importScalar(gpu, h_vel, vel);
return;
}
|
308cc5fca4873bc36055c284767f69c00ea0d730.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2018 - The OPRECOMP Project Consortium, Alma Mater Studiorum
Universit di Bologna. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <hip/hip_fp16.h>
#include "fp16_conversion.h"
#ifndef N
#define N 4096
#endif
#define sqrt_of_array_cell(x,j) ((half)sqrt(x[j]))
#define FLOAT_N (3214212.01f)
#define EPS 0.005f
/* Thread block dimensions for kernel 1*/
#define DIM_THREAD_BLOCK_KERNEL_1_X 256
#define DIM_THREAD_BLOCK_KERNEL_1_Y 1
/* Thread block dimensions for kernel 2*/
#define DIM_THREAD_BLOCK_KERNEL_2_X 256
#define DIM_THREAD_BLOCK_KERNEL_2_Y 1
/* Thread block dimensions for kernel 3*/
#define DIM_THREAD_BLOCK_KERNEL_3_X 32
#define DIM_THREAD_BLOCK_KERNEL_3_Y 8
/* Thread block dimensions for kernel 4*/
#define DIM_THREAD_BLOCK_KERNEL_4_X 256
#define DIM_THREAD_BLOCK_KERNEL_4_Y 1
__global__ void mean_kernel(half2 *mean, half2 *data)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j < N/2)
{
mean[j] = __floats2half2_rn(0.0f, 0.0f);
int i;
for(i=0; i < N; i++)
{
mean[j] = __hadd2(mean[j], data[i*N + j]);
}
mean[j] = __hmul2(mean[j], __floats2half2_rn(1.0f/FLOAT_N, 1.0f/FLOAT_N));
}
}
__global__ void std_kernel(half2 *mean, half2 *std, half2 *data)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j < N/2)
{
std[j] = __floats2half2_rn(0.0f, 0.0f);
int i;
for(i = 0; i < N; i++)
{
half2 val = __hsub2(data[i*N + j], mean[j]);
std[j] = __hfma2(val, val, std[j]);
}
std[j] = __hmul2(std[j], __floats2half2_rn(1.0f/FLOAT_N, 1.0f/FLOAT_N));
std[j] = h2sqrt(std[j]);
if(__hle(__low2half(std[j]), __float2half_rz(EPS)))
{
std[j] = __halves2half2(__float2half_rz(1.0f), __high2half(std[j]));
}
if(__hle(__high2half(std[j]), __float2half_rz(EPS)))
{
std[j] = __halves2half2(__low2half(std[j]), __float2half_rz(1.0f));
}
}
}
__global__ void reduce_kernel(half2 *mean, half2 *std, half2 *data)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < N) && (j < N/2))
{
data[i*N + j] = __hsub2(data[i*N + j], mean[j]);
data[i*N + j] = __hmul2(data[i*N + j], h2rcp(h2sqrt(__hmul2(__floats2half2_rn(FLOAT_N, FLOAT_N), std[j]))));
}
}
__global__ void corr_kernel(half *symmat, half *data)
{
int j1 = blockIdx.x * blockDim.x + threadIdx.x;
int i, j2;
if (j1 < (N-1))
{
symmat[j1*N + j1] = __float2half_rz(1.0f);
for (j2 = (j1 + 1); j2 < N; j2++)
{
symmat[j1*N + j2] = __float2half_rz(0.0f);
for(i = 0; i < N; i++)
{
symmat[j1*N + j2] = __hfma(data[i*N + j1], data[i*N + j2], symmat[j1*N + j2]);
}
symmat[j2*N + j1] = symmat[j1*N + j2];
}
}
}
int main()
{
int i;
half * data = (half *) malloc(N*N*sizeof(half));
half * symmat = (half *) malloc(N*N*sizeof(half));
half * mean = (half *) malloc(N*sizeof(half));
half * stddev = (half *) malloc(N*sizeof(half));
srand(5497);
for (i = 0; i < N*N; i++)
data[i] = approx_float_to_half((float)rand() / (float)RAND_MAX);
half *data_gpu;
half *stddev_gpu;
half *mean_gpu;
half *symmat_gpu;
hipMalloc((void **)&data_gpu, sizeof(half) * N * N);
hipMalloc((void **)&symmat_gpu, sizeof(half) * N * N);
hipMalloc((void **)&stddev_gpu, sizeof(half) * N);
hipMalloc((void **)&mean_gpu, sizeof(half) * N);
hipMemcpy(data_gpu, data, sizeof(half) * N * N, hipMemcpyHostToDevice);
hipMemcpy(symmat_gpu, symmat, sizeof(half) * N * N, hipMemcpyHostToDevice);
hipMemcpy(stddev_gpu, stddev, sizeof(half) * N, hipMemcpyHostToDevice);
hipMemcpy(mean_gpu, mean, sizeof(half) * N, hipMemcpyHostToDevice);
dim3 block1(DIM_THREAD_BLOCK_KERNEL_1_X, DIM_THREAD_BLOCK_KERNEL_1_Y);
dim3 grid1((size_t)(ceil((float)(N)) / ((float)DIM_THREAD_BLOCK_KERNEL_1_X)), 1);
dim3 block2(DIM_THREAD_BLOCK_KERNEL_2_X, DIM_THREAD_BLOCK_KERNEL_2_Y);
dim3 grid2((size_t)(ceil((float)(N)) / ((float)DIM_THREAD_BLOCK_KERNEL_2_X)), 1);
dim3 block3(DIM_THREAD_BLOCK_KERNEL_3_X, DIM_THREAD_BLOCK_KERNEL_3_Y);
dim3 grid3((size_t)(ceil((float)(N)) / ((float)DIM_THREAD_BLOCK_KERNEL_3_X)), (size_t)(ceil((float)(N)) / ((float)DIM_THREAD_BLOCK_KERNEL_3_Y)));
dim3 block4(DIM_THREAD_BLOCK_KERNEL_4_X, DIM_THREAD_BLOCK_KERNEL_4_Y);
dim3 grid4((size_t)(ceil((float)(N)) / ((float)DIM_THREAD_BLOCK_KERNEL_4_X)), 1);
hipLaunchKernelGGL(( mean_kernel), dim3(grid1), dim3(block1) , 0, 0, (half2*)mean_gpu,(half2*)data_gpu);
hipDeviceSynchronize();
hipLaunchKernelGGL(( std_kernel), dim3(grid2), dim3(block2) , 0, 0, (half2*)mean_gpu,(half2*)stddev_gpu,(half2*)data_gpu);
hipDeviceSynchronize();
hipLaunchKernelGGL(( reduce_kernel), dim3(grid3), dim3(block3) , 0, 0, (half2*)mean_gpu,(half2*)stddev_gpu,(half2*)data_gpu);
hipDeviceSynchronize();
hipLaunchKernelGGL(( corr_kernel), dim3(grid4), dim3(block4) , 0, 0, symmat_gpu,data_gpu);
hipDeviceSynchronize();
hipMemcpy(symmat, symmat_gpu, sizeof(half) * N * N, hipMemcpyDeviceToHost);
for (i = 0; i < N*N; i++)
printf("%.15f,", half_to_float(symmat[i]));
return 0;
}
|
308cc5fca4873bc36055c284767f69c00ea0d730.cu
|
/*
Copyright 2018 - The OPRECOMP Project Consortium, Alma Mater Studiorum
Università di Bologna. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda_fp16.h>
#include "fp16_conversion.h"
#ifndef N
#define N 4096
#endif
#define sqrt_of_array_cell(x,j) ((half)sqrt(x[j]))
#define FLOAT_N (3214212.01f)
#define EPS 0.005f
/* Thread block dimensions for kernel 1*/
#define DIM_THREAD_BLOCK_KERNEL_1_X 256
#define DIM_THREAD_BLOCK_KERNEL_1_Y 1
/* Thread block dimensions for kernel 2*/
#define DIM_THREAD_BLOCK_KERNEL_2_X 256
#define DIM_THREAD_BLOCK_KERNEL_2_Y 1
/* Thread block dimensions for kernel 3*/
#define DIM_THREAD_BLOCK_KERNEL_3_X 32
#define DIM_THREAD_BLOCK_KERNEL_3_Y 8
/* Thread block dimensions for kernel 4*/
#define DIM_THREAD_BLOCK_KERNEL_4_X 256
#define DIM_THREAD_BLOCK_KERNEL_4_Y 1
__global__ void mean_kernel(half2 *mean, half2 *data)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j < N/2)
{
mean[j] = __floats2half2_rn(0.0f, 0.0f);
int i;
for(i=0; i < N; i++)
{
mean[j] = __hadd2(mean[j], data[i*N + j]);
}
mean[j] = __hmul2(mean[j], __floats2half2_rn(1.0f/FLOAT_N, 1.0f/FLOAT_N));
}
}
__global__ void std_kernel(half2 *mean, half2 *std, half2 *data)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j < N/2)
{
std[j] = __floats2half2_rn(0.0f, 0.0f);
int i;
for(i = 0; i < N; i++)
{
half2 val = __hsub2(data[i*N + j], mean[j]);
std[j] = __hfma2(val, val, std[j]);
}
std[j] = __hmul2(std[j], __floats2half2_rn(1.0f/FLOAT_N, 1.0f/FLOAT_N));
std[j] = h2sqrt(std[j]);
if(__hle(__low2half(std[j]), __float2half_rz(EPS)))
{
std[j] = __halves2half2(__float2half_rz(1.0f), __high2half(std[j]));
}
if(__hle(__high2half(std[j]), __float2half_rz(EPS)))
{
std[j] = __halves2half2(__low2half(std[j]), __float2half_rz(1.0f));
}
}
}
__global__ void reduce_kernel(half2 *mean, half2 *std, half2 *data)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < N) && (j < N/2))
{
data[i*N + j] = __hsub2(data[i*N + j], mean[j]);
data[i*N + j] = __hmul2(data[i*N + j], h2rcp(h2sqrt(__hmul2(__floats2half2_rn(FLOAT_N, FLOAT_N), std[j]))));
}
}
__global__ void corr_kernel(half *symmat, half *data)
{
int j1 = blockIdx.x * blockDim.x + threadIdx.x;
int i, j2;
if (j1 < (N-1))
{
symmat[j1*N + j1] = __float2half_rz(1.0f);
for (j2 = (j1 + 1); j2 < N; j2++)
{
symmat[j1*N + j2] = __float2half_rz(0.0f);
for(i = 0; i < N; i++)
{
symmat[j1*N + j2] = __hfma(data[i*N + j1], data[i*N + j2], symmat[j1*N + j2]);
}
symmat[j2*N + j1] = symmat[j1*N + j2];
}
}
}
int main()
{
int i;
half * data = (half *) malloc(N*N*sizeof(half));
half * symmat = (half *) malloc(N*N*sizeof(half));
half * mean = (half *) malloc(N*sizeof(half));
half * stddev = (half *) malloc(N*sizeof(half));
srand(5497);
for (i = 0; i < N*N; i++)
data[i] = approx_float_to_half((float)rand() / (float)RAND_MAX);
half *data_gpu;
half *stddev_gpu;
half *mean_gpu;
half *symmat_gpu;
cudaMalloc((void **)&data_gpu, sizeof(half) * N * N);
cudaMalloc((void **)&symmat_gpu, sizeof(half) * N * N);
cudaMalloc((void **)&stddev_gpu, sizeof(half) * N);
cudaMalloc((void **)&mean_gpu, sizeof(half) * N);
cudaMemcpy(data_gpu, data, sizeof(half) * N * N, cudaMemcpyHostToDevice);
cudaMemcpy(symmat_gpu, symmat, sizeof(half) * N * N, cudaMemcpyHostToDevice);
cudaMemcpy(stddev_gpu, stddev, sizeof(half) * N, cudaMemcpyHostToDevice);
cudaMemcpy(mean_gpu, mean, sizeof(half) * N, cudaMemcpyHostToDevice);
dim3 block1(DIM_THREAD_BLOCK_KERNEL_1_X, DIM_THREAD_BLOCK_KERNEL_1_Y);
dim3 grid1((size_t)(ceil((float)(N)) / ((float)DIM_THREAD_BLOCK_KERNEL_1_X)), 1);
dim3 block2(DIM_THREAD_BLOCK_KERNEL_2_X, DIM_THREAD_BLOCK_KERNEL_2_Y);
dim3 grid2((size_t)(ceil((float)(N)) / ((float)DIM_THREAD_BLOCK_KERNEL_2_X)), 1);
dim3 block3(DIM_THREAD_BLOCK_KERNEL_3_X, DIM_THREAD_BLOCK_KERNEL_3_Y);
dim3 grid3((size_t)(ceil((float)(N)) / ((float)DIM_THREAD_BLOCK_KERNEL_3_X)), (size_t)(ceil((float)(N)) / ((float)DIM_THREAD_BLOCK_KERNEL_3_Y)));
dim3 block4(DIM_THREAD_BLOCK_KERNEL_4_X, DIM_THREAD_BLOCK_KERNEL_4_Y);
dim3 grid4((size_t)(ceil((float)(N)) / ((float)DIM_THREAD_BLOCK_KERNEL_4_X)), 1);
mean_kernel<<< grid1, block1 >>>((half2*)mean_gpu,(half2*)data_gpu);
cudaThreadSynchronize();
std_kernel<<< grid2, block2 >>>((half2*)mean_gpu,(half2*)stddev_gpu,(half2*)data_gpu);
cudaThreadSynchronize();
reduce_kernel<<< grid3, block3 >>>((half2*)mean_gpu,(half2*)stddev_gpu,(half2*)data_gpu);
cudaThreadSynchronize();
corr_kernel<<< grid4, block4 >>>(symmat_gpu,data_gpu);
cudaThreadSynchronize();
cudaMemcpy(symmat, symmat_gpu, sizeof(half) * N * N, cudaMemcpyDeviceToHost);
for (i = 0; i < N*N; i++)
printf("%.15f,", half_to_float(symmat[i]));
return 0;
}
|
bf7ef91a2b537fe967c5b94bf0622f3da3409a07.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <iostream>
using namespace std;
__device__ extern double access_const_mem(int index);
extern void init_const_mem(double initValue);
__global__
void const_kernel() {
for(int i=0;i<10;i++) {
printf("%d: %.3f\n", i, access_const_mem(i));
}
}
int main() {
init_const_mem(23);
hipLaunchKernelGGL(( const_kernel), dim3(1), dim3(1), 0, 0, );
hipDeviceSynchronize();
return 0;
}
|
bf7ef91a2b537fe967c5b94bf0622f3da3409a07.cu
|
#include <stdio.h>
#include <cuda_runtime.h>
#include <iostream>
using namespace std;
__device__ extern double access_const_mem(int index);
extern void init_const_mem(double initValue);
__global__
void const_kernel() {
for(int i=0;i<10;i++) {
printf("%d: %.3f\n", i, access_const_mem(i));
}
}
int main() {
init_const_mem(23);
const_kernel<<<1, 1>>>();
cudaDeviceSynchronize();
return 0;
}
|
2e08c5f0c345d8ef4350a02d13ef169b2cf07687.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <raft/linalg/transpose.h>
#include <test_utils.h>
#include <cuml/linear_model/glm.hpp>
#include <glm/qn/glm_linear.cuh>
#include <glm/qn/glm_logistic.cuh>
#include <glm/qn/glm_softmax.cuh>
#include <glm/qn/qn.cuh>
#include <vector>
namespace ML {
namespace GLM {
using namespace MLCommon;
struct QuasiNewtonTest : ::testing::Test {
static constexpr int N = 10;
static constexpr int D = 2;
const static double *nobptr;
const static double tol;
const static double X[N][D];
raft::handle_t cuml_handle;
const raft::handle_t &handle;
hipStream_t stream;
std::shared_ptr<SimpleMatOwning<double>> Xdev;
std::shared_ptr<SimpleVecOwning<double>> ydev;
std::shared_ptr<deviceAllocator> allocator;
QuasiNewtonTest() : handle(cuml_handle) {}
void SetUp() {
stream = cuml_handle.get_stream();
Xdev.reset(new SimpleMatOwning<double>(handle.get_device_allocator(), N, D,
stream, ROW_MAJOR));
raft::update_device(Xdev->data, &X[0][0], Xdev->len, stream);
ydev.reset(
new SimpleVecOwning<double>(handle.get_device_allocator(), N, stream));
CUDA_CHECK(hipStreamSynchronize(stream));
allocator = handle.get_device_allocator();
}
void TearDown() {}
};
const double *QuasiNewtonTest::nobptr = 0;
const double QuasiNewtonTest::tol = 5e-6;
const double QuasiNewtonTest::X[QuasiNewtonTest::N][QuasiNewtonTest::D] = {
{-0.2047076594847130, 0.4789433380575482},
{-0.5194387150567381, -0.5557303043474900},
{1.9657805725027142, 1.3934058329729904},
{0.0929078767437177, 0.2817461528302025},
{0.7690225676118387, 1.2464347363862822},
{1.0071893575830049, -1.2962211091122635},
{0.2749916334321240, 0.2289128789353159},
{1.3529168351654497, 0.8864293405915888},
{-2.0016373096603974, -0.3718425371402544},
{1.6690253095248706, -0.4385697358355719}};
template <typename T, class Comp>
::testing::AssertionResult checkParamsEqual(const raft::handle_t &handle,
const T *host_weights,
const T *host_bias, const T *w,
const GLMDims &dims, Comp &comp,
hipStream_t stream) {
int C = dims.C;
int D = dims.D;
bool fit_intercept = dims.fit_intercept;
std::vector<T> w_ref_cm(C * D);
int idx = 0;
for (int d = 0; d < D; d++)
for (int c = 0; c < C; c++) {
w_ref_cm[idx++] = host_weights[c * D + d];
}
SimpleVecOwning<T> w_ref(handle.get_device_allocator(), dims.n_param, stream);
raft::update_device(w_ref.data, &w_ref_cm[0], C * D, stream);
if (fit_intercept) {
raft::update_device(&w_ref.data[C * D], host_bias, C, stream);
}
CUDA_CHECK(hipStreamSynchronize(stream));
return raft::devArrMatch(w_ref.data, w, w_ref.len, comp);
}
template <typename T, class LossFunction>
T run(const raft::handle_t &handle, LossFunction &loss, const SimpleMat<T> &X,
const SimpleVec<T> &y, T l1, T l2, T *w, SimpleMat<T> &z, int verbosity,
hipStream_t stream) {
int max_iter = 100;
T grad_tol = 1e-16;
int linesearch_max_iter = 50;
int lbfgs_memory = 5;
int num_iters = 0;
T fx;
SimpleVec<T> w0(w, loss.n_param);
qn_fit<T, LossFunction>(handle, loss, X.data, y.data, z.data, X.m, l1, l2,
max_iter, grad_tol, linesearch_max_iter, lbfgs_memory,
verbosity, w0.data, &fx, &num_iters, X.ord, stream);
return fx;
}
template <typename T>
T run_api(const raft::handle_t &cuml_handle, int loss_type, int C,
bool fit_intercept, const SimpleMat<T> &X, const SimpleVec<T> &y,
T l1, T l2, T *w, SimpleMat<T> &z, int verbosity,
hipStream_t stream) {
int max_iter = 100;
T grad_tol = 1e-8;
int linesearch_max_iter = 50;
int lbfgs_memory = 5;
int num_iters = 0;
SimpleVec<T> w0(w, X.n + fit_intercept);
w0.fill(T(0), stream);
T fx;
qnFit(cuml_handle, X.data, y.data, X.m, X.n, C, fit_intercept, l1, l2,
max_iter, grad_tol, linesearch_max_iter, lbfgs_memory, verbosity, w,
&fx, &num_iters, false, loss_type);
return fx;
}
TEST_F(QuasiNewtonTest, binary_logistic_vs_sklearn) {
raft::CompareApprox<double> compApprox(tol);
// Test case generated in python and solved with sklearn
double y[N] = {1, 1, 1, 0, 1, 0, 1, 0, 1, 0};
raft::update_device(ydev->data, &y[0], ydev->len, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
double alpha = 0.01 * N;
LogisticLoss<double> loss_b(handle, D, true);
LogisticLoss<double> loss_no_b(handle, D, false);
SimpleVecOwning<double> w0(allocator, D + 1, stream);
SimpleVecOwning<double> z(allocator, N, stream);
double l1, l2, fx;
double w_l1_b[2] = {-1.6899370396155091, 1.9021577534928300};
double b_l1_b = 0.8057670813749118;
double obj_l1_b = 0.44295941481024703;
l1 = alpha;
l2 = 0.0;
fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_b, fx));
ASSERT_TRUE(checkParamsEqual(handle, &w_l1_b[0], &b_l1_b, w0.data, loss_b,
compApprox, stream));
fx = run_api(cuml_handle, 0, 2, loss_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_b, fx));
double w_l2_b[2] = {-1.5339880402781370, 1.6788639581350926};
double b_l2_b = 0.806087868102401;
double obj_l2_b = 0.4378085369889721;
l1 = 0;
l2 = alpha;
fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_b, fx));
ASSERT_TRUE(checkParamsEqual(handle, &w_l2_b[0], &b_l2_b, w0.data, loss_b,
compApprox, stream));
fx = run_api(cuml_handle, 0, 2, loss_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_b, fx));
double w_l1_no_b[2] = {-1.6215035298864591, 2.3650868394981086};
double obj_l1_no_b = 0.4769896009200278;
l1 = alpha;
l2 = 0.0;
fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_no_b, fx));
ASSERT_TRUE(checkParamsEqual(handle, &w_l1_no_b[0], nobptr, w0.data,
loss_no_b, compApprox, stream));
fx = run_api(cuml_handle, 0, 2, loss_no_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_no_b, fx));
double w_l2_no_b[2] = {-1.3931049893764620, 2.0140103094119621};
double obj_l2_no_b = 0.47502098062114273;
l1 = 0;
l2 = alpha;
fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_no_b, fx));
ASSERT_TRUE(checkParamsEqual(handle, &w_l2_no_b[0], nobptr, w0.data,
loss_no_b, compApprox, stream));
fx = run_api(cuml_handle, 0, 2, loss_no_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_no_b, fx));
}
TEST_F(QuasiNewtonTest, multiclass_logistic_vs_sklearn) {
// The data seems to small for the objective to be strongly convex
// leaving out exact param checks
raft::CompareApprox<double> compApprox(tol);
double y[N] = {2, 2, 0, 3, 3, 0, 0, 0, 1, 0};
raft::update_device(ydev->data, &y[0], ydev->len, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
double fx, l1, l2;
int C = 4;
double alpha = 0.016 * N;
SimpleMatOwning<double> z(allocator, C, N, stream);
SimpleVecOwning<double> w0(allocator, C * (D + 1), stream);
Softmax<double> loss_b(handle, D, C, true);
Softmax<double> loss_no_b(handle, D, C, false);
l1 = alpha;
l2 = 0.0;
double obj_l1_b = 0.5407911382311313;
fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_b, fx));
fx = run_api(cuml_handle, 2, C, loss_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_b, fx));
l1 = 0.0;
l2 = alpha;
double obj_l2_b = 0.5721784062720949;
fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_b, fx));
fx = run_api(cuml_handle, 2, C, loss_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_b, fx));
l1 = alpha;
l2 = 0.0;
double obj_l1_no_b = 0.6606929813245878;
fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_no_b, fx));
fx = run_api(cuml_handle, 2, C, loss_no_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_no_b, fx));
l1 = 0.0;
l2 = alpha;
double obj_l2_no_b = 0.6597171282106854;
fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_no_b, fx));
fx = run_api(cuml_handle, 2, C, loss_no_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_no_b, fx));
}
TEST_F(QuasiNewtonTest, linear_regression_vs_sklearn) {
raft::CompareApprox<double> compApprox(tol);
double y[N] = {0.2675836026202781, -0.0678277759663704, -0.6334027174275105,
-0.1018336189077367, 0.0933815935886932, -1.1058853496996381,
-0.1658298189619160, -0.2954290675648911, 0.7966520536712608,
-1.0767450516284769};
raft::update_device(ydev->data, &y[0], ydev->len, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
double fx, l1, l2;
double alpha = 0.01 * N;
SimpleVecOwning<double> w0(allocator, D + 1, stream);
SimpleVecOwning<double> z(allocator, N, stream);
SquaredLoss<double> loss_b(handle, D, true);
SquaredLoss<double> loss_no_b(handle, D, false);
l1 = alpha;
l2 = 0.0;
double w_l1_b[2] = {-0.4952397281519840, 0.3813315300180231};
double b_l1_b = -0.08140861819001188;
double obj_l1_b = 0.011136986298775138;
fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_b, fx));
ASSERT_TRUE(checkParamsEqual(handle, &w_l1_b[0], &b_l1_b, w0.data, loss_b,
compApprox, stream));
fx = run_api(cuml_handle, 1, 1, loss_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_b, fx));
l1 = 0.0;
l2 = alpha;
double w_l2_b[2] = {-0.5022384743587150, 0.3937352417485087};
double b_l2_b = -0.08062397391797513;
double obj_l2_b = 0.004268621967866347;
fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_b, fx));
ASSERT_TRUE(checkParamsEqual(handle, &w_l2_b[0], &b_l2_b, w0.data, loss_b,
compApprox, stream));
fx = run_api(cuml_handle, 1, 1, loss_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_b, fx));
l1 = alpha;
l2 = 0.0;
double w_l1_no_b[2] = {-0.5175178128147135, 0.3720844589831813};
double obj_l1_no_b = 0.013981355746112447;
fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_no_b, fx));
ASSERT_TRUE(checkParamsEqual(handle, &w_l1_no_b[0], nobptr, w0.data,
loss_no_b, compApprox, stream));
fx = run_api(cuml_handle, 1, 1, loss_no_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_no_b, fx));
l1 = 0.0;
l2 = alpha;
double w_l2_no_b[2] = {-0.5241651041233270, 0.3846317886627560};
double obj_l2_no_b = 0.007061261366969662;
fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_no_b, fx));
ASSERT_TRUE(checkParamsEqual(handle, &w_l2_no_b[0], nobptr, w0.data,
loss_no_b, compApprox, stream));
fx = run_api(cuml_handle, 1, 1, loss_no_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_no_b, fx));
}
TEST_F(QuasiNewtonTest, predict) {
raft::CompareApprox<double> compApprox(1e-8);
std::vector<double> w_host(D);
w_host[0] = 1;
std::vector<double> preds_host(N);
SimpleVecOwning<double> w(allocator, D, stream);
SimpleVecOwning<double> preds(allocator, N, stream);
raft::update_device(w.data, &w_host[0], w.len, stream);
qnPredict(handle, Xdev->data, N, D, 2, false, w.data, false, 0, preds.data,
stream);
raft::update_host(&preds_host[0], preds.data, preds.len, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
for (int it = 0; it < N; it++) {
ASSERT_TRUE(X[it][0] > 0 ? compApprox(preds_host[it], 1)
: compApprox(preds_host[it], 0));
}
qnPredict(handle, Xdev->data, N, D, 1, false, w.data, false, 1, preds.data,
stream);
raft::update_host(&preds_host[0], preds.data, preds.len, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
for (int it = 0; it < N; it++) {
ASSERT_TRUE(compApprox(X[it][0], preds_host[it]));
}
}
TEST_F(QuasiNewtonTest, predict_softmax) {
raft::CompareApprox<double> compApprox(1e-8);
int C = 4;
std::vector<double> w_host(C * D);
w_host[0] = 1;
w_host[D * C - 1] = 1;
std::vector<double> preds_host(N);
SimpleVecOwning<double> w(allocator, w_host.size(), stream);
SimpleVecOwning<double> preds(allocator, N, stream);
raft::update_device(w.data, &w_host[0], w.len, stream);
qnPredict(handle, Xdev->data, N, D, C, false, w.data, false, 2, preds.data,
stream);
raft::update_host(&preds_host[0], preds.data, preds.len, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
for (int it = 0; it < N; it++) {
if (X[it][0] < 0 && X[it][1] < 0) {
ASSERT_TRUE(compApprox(1, preds_host[it]));
} else if (X[it][0] > X[it][1]) {
ASSERT_TRUE(compApprox(0, preds_host[it]));
} else {
ASSERT_TRUE(compApprox(C - 1, preds_host[it]));
}
}
}
} // namespace GLM
} // end namespace ML
|
2e08c5f0c345d8ef4350a02d13ef169b2cf07687.cu
|
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <raft/linalg/transpose.h>
#include <test_utils.h>
#include <cuml/linear_model/glm.hpp>
#include <glm/qn/glm_linear.cuh>
#include <glm/qn/glm_logistic.cuh>
#include <glm/qn/glm_softmax.cuh>
#include <glm/qn/qn.cuh>
#include <vector>
namespace ML {
namespace GLM {
using namespace MLCommon;
struct QuasiNewtonTest : ::testing::Test {
static constexpr int N = 10;
static constexpr int D = 2;
const static double *nobptr;
const static double tol;
const static double X[N][D];
raft::handle_t cuml_handle;
const raft::handle_t &handle;
cudaStream_t stream;
std::shared_ptr<SimpleMatOwning<double>> Xdev;
std::shared_ptr<SimpleVecOwning<double>> ydev;
std::shared_ptr<deviceAllocator> allocator;
QuasiNewtonTest() : handle(cuml_handle) {}
void SetUp() {
stream = cuml_handle.get_stream();
Xdev.reset(new SimpleMatOwning<double>(handle.get_device_allocator(), N, D,
stream, ROW_MAJOR));
raft::update_device(Xdev->data, &X[0][0], Xdev->len, stream);
ydev.reset(
new SimpleVecOwning<double>(handle.get_device_allocator(), N, stream));
CUDA_CHECK(cudaStreamSynchronize(stream));
allocator = handle.get_device_allocator();
}
void TearDown() {}
};
const double *QuasiNewtonTest::nobptr = 0;
const double QuasiNewtonTest::tol = 5e-6;
const double QuasiNewtonTest::X[QuasiNewtonTest::N][QuasiNewtonTest::D] = {
{-0.2047076594847130, 0.4789433380575482},
{-0.5194387150567381, -0.5557303043474900},
{1.9657805725027142, 1.3934058329729904},
{0.0929078767437177, 0.2817461528302025},
{0.7690225676118387, 1.2464347363862822},
{1.0071893575830049, -1.2962211091122635},
{0.2749916334321240, 0.2289128789353159},
{1.3529168351654497, 0.8864293405915888},
{-2.0016373096603974, -0.3718425371402544},
{1.6690253095248706, -0.4385697358355719}};
template <typename T, class Comp>
::testing::AssertionResult checkParamsEqual(const raft::handle_t &handle,
const T *host_weights,
const T *host_bias, const T *w,
const GLMDims &dims, Comp &comp,
cudaStream_t stream) {
int C = dims.C;
int D = dims.D;
bool fit_intercept = dims.fit_intercept;
std::vector<T> w_ref_cm(C * D);
int idx = 0;
for (int d = 0; d < D; d++)
for (int c = 0; c < C; c++) {
w_ref_cm[idx++] = host_weights[c * D + d];
}
SimpleVecOwning<T> w_ref(handle.get_device_allocator(), dims.n_param, stream);
raft::update_device(w_ref.data, &w_ref_cm[0], C * D, stream);
if (fit_intercept) {
raft::update_device(&w_ref.data[C * D], host_bias, C, stream);
}
CUDA_CHECK(cudaStreamSynchronize(stream));
return raft::devArrMatch(w_ref.data, w, w_ref.len, comp);
}
template <typename T, class LossFunction>
T run(const raft::handle_t &handle, LossFunction &loss, const SimpleMat<T> &X,
const SimpleVec<T> &y, T l1, T l2, T *w, SimpleMat<T> &z, int verbosity,
cudaStream_t stream) {
int max_iter = 100;
T grad_tol = 1e-16;
int linesearch_max_iter = 50;
int lbfgs_memory = 5;
int num_iters = 0;
T fx;
SimpleVec<T> w0(w, loss.n_param);
qn_fit<T, LossFunction>(handle, loss, X.data, y.data, z.data, X.m, l1, l2,
max_iter, grad_tol, linesearch_max_iter, lbfgs_memory,
verbosity, w0.data, &fx, &num_iters, X.ord, stream);
return fx;
}
template <typename T>
T run_api(const raft::handle_t &cuml_handle, int loss_type, int C,
bool fit_intercept, const SimpleMat<T> &X, const SimpleVec<T> &y,
T l1, T l2, T *w, SimpleMat<T> &z, int verbosity,
cudaStream_t stream) {
int max_iter = 100;
T grad_tol = 1e-8;
int linesearch_max_iter = 50;
int lbfgs_memory = 5;
int num_iters = 0;
SimpleVec<T> w0(w, X.n + fit_intercept);
w0.fill(T(0), stream);
T fx;
qnFit(cuml_handle, X.data, y.data, X.m, X.n, C, fit_intercept, l1, l2,
max_iter, grad_tol, linesearch_max_iter, lbfgs_memory, verbosity, w,
&fx, &num_iters, false, loss_type);
return fx;
}
TEST_F(QuasiNewtonTest, binary_logistic_vs_sklearn) {
raft::CompareApprox<double> compApprox(tol);
// Test case generated in python and solved with sklearn
double y[N] = {1, 1, 1, 0, 1, 0, 1, 0, 1, 0};
raft::update_device(ydev->data, &y[0], ydev->len, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
double alpha = 0.01 * N;
LogisticLoss<double> loss_b(handle, D, true);
LogisticLoss<double> loss_no_b(handle, D, false);
SimpleVecOwning<double> w0(allocator, D + 1, stream);
SimpleVecOwning<double> z(allocator, N, stream);
double l1, l2, fx;
double w_l1_b[2] = {-1.6899370396155091, 1.9021577534928300};
double b_l1_b = 0.8057670813749118;
double obj_l1_b = 0.44295941481024703;
l1 = alpha;
l2 = 0.0;
fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_b, fx));
ASSERT_TRUE(checkParamsEqual(handle, &w_l1_b[0], &b_l1_b, w0.data, loss_b,
compApprox, stream));
fx = run_api(cuml_handle, 0, 2, loss_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_b, fx));
double w_l2_b[2] = {-1.5339880402781370, 1.6788639581350926};
double b_l2_b = 0.806087868102401;
double obj_l2_b = 0.4378085369889721;
l1 = 0;
l2 = alpha;
fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_b, fx));
ASSERT_TRUE(checkParamsEqual(handle, &w_l2_b[0], &b_l2_b, w0.data, loss_b,
compApprox, stream));
fx = run_api(cuml_handle, 0, 2, loss_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_b, fx));
double w_l1_no_b[2] = {-1.6215035298864591, 2.3650868394981086};
double obj_l1_no_b = 0.4769896009200278;
l1 = alpha;
l2 = 0.0;
fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_no_b, fx));
ASSERT_TRUE(checkParamsEqual(handle, &w_l1_no_b[0], nobptr, w0.data,
loss_no_b, compApprox, stream));
fx = run_api(cuml_handle, 0, 2, loss_no_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_no_b, fx));
double w_l2_no_b[2] = {-1.3931049893764620, 2.0140103094119621};
double obj_l2_no_b = 0.47502098062114273;
l1 = 0;
l2 = alpha;
fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_no_b, fx));
ASSERT_TRUE(checkParamsEqual(handle, &w_l2_no_b[0], nobptr, w0.data,
loss_no_b, compApprox, stream));
fx = run_api(cuml_handle, 0, 2, loss_no_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_no_b, fx));
}
TEST_F(QuasiNewtonTest, multiclass_logistic_vs_sklearn) {
// The data seems to small for the objective to be strongly convex
// leaving out exact param checks
raft::CompareApprox<double> compApprox(tol);
double y[N] = {2, 2, 0, 3, 3, 0, 0, 0, 1, 0};
raft::update_device(ydev->data, &y[0], ydev->len, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
double fx, l1, l2;
int C = 4;
double alpha = 0.016 * N;
SimpleMatOwning<double> z(allocator, C, N, stream);
SimpleVecOwning<double> w0(allocator, C * (D + 1), stream);
Softmax<double> loss_b(handle, D, C, true);
Softmax<double> loss_no_b(handle, D, C, false);
l1 = alpha;
l2 = 0.0;
double obj_l1_b = 0.5407911382311313;
fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_b, fx));
fx = run_api(cuml_handle, 2, C, loss_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_b, fx));
l1 = 0.0;
l2 = alpha;
double obj_l2_b = 0.5721784062720949;
fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_b, fx));
fx = run_api(cuml_handle, 2, C, loss_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_b, fx));
l1 = alpha;
l2 = 0.0;
double obj_l1_no_b = 0.6606929813245878;
fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_no_b, fx));
fx = run_api(cuml_handle, 2, C, loss_no_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_no_b, fx));
l1 = 0.0;
l2 = alpha;
double obj_l2_no_b = 0.6597171282106854;
fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_no_b, fx));
fx = run_api(cuml_handle, 2, C, loss_no_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_no_b, fx));
}
TEST_F(QuasiNewtonTest, linear_regression_vs_sklearn) {
raft::CompareApprox<double> compApprox(tol);
double y[N] = {0.2675836026202781, -0.0678277759663704, -0.6334027174275105,
-0.1018336189077367, 0.0933815935886932, -1.1058853496996381,
-0.1658298189619160, -0.2954290675648911, 0.7966520536712608,
-1.0767450516284769};
raft::update_device(ydev->data, &y[0], ydev->len, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
double fx, l1, l2;
double alpha = 0.01 * N;
SimpleVecOwning<double> w0(allocator, D + 1, stream);
SimpleVecOwning<double> z(allocator, N, stream);
SquaredLoss<double> loss_b(handle, D, true);
SquaredLoss<double> loss_no_b(handle, D, false);
l1 = alpha;
l2 = 0.0;
double w_l1_b[2] = {-0.4952397281519840, 0.3813315300180231};
double b_l1_b = -0.08140861819001188;
double obj_l1_b = 0.011136986298775138;
fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_b, fx));
ASSERT_TRUE(checkParamsEqual(handle, &w_l1_b[0], &b_l1_b, w0.data, loss_b,
compApprox, stream));
fx = run_api(cuml_handle, 1, 1, loss_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_b, fx));
l1 = 0.0;
l2 = alpha;
double w_l2_b[2] = {-0.5022384743587150, 0.3937352417485087};
double b_l2_b = -0.08062397391797513;
double obj_l2_b = 0.004268621967866347;
fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_b, fx));
ASSERT_TRUE(checkParamsEqual(handle, &w_l2_b[0], &b_l2_b, w0.data, loss_b,
compApprox, stream));
fx = run_api(cuml_handle, 1, 1, loss_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_b, fx));
l1 = alpha;
l2 = 0.0;
double w_l1_no_b[2] = {-0.5175178128147135, 0.3720844589831813};
double obj_l1_no_b = 0.013981355746112447;
fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_no_b, fx));
ASSERT_TRUE(checkParamsEqual(handle, &w_l1_no_b[0], nobptr, w0.data,
loss_no_b, compApprox, stream));
fx = run_api(cuml_handle, 1, 1, loss_no_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_no_b, fx));
l1 = 0.0;
l2 = alpha;
double w_l2_no_b[2] = {-0.5241651041233270, 0.3846317886627560};
double obj_l2_no_b = 0.007061261366969662;
fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_no_b, fx));
ASSERT_TRUE(checkParamsEqual(handle, &w_l2_no_b[0], nobptr, w0.data,
loss_no_b, compApprox, stream));
fx = run_api(cuml_handle, 1, 1, loss_no_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_no_b, fx));
}
TEST_F(QuasiNewtonTest, predict) {
raft::CompareApprox<double> compApprox(1e-8);
std::vector<double> w_host(D);
w_host[0] = 1;
std::vector<double> preds_host(N);
SimpleVecOwning<double> w(allocator, D, stream);
SimpleVecOwning<double> preds(allocator, N, stream);
raft::update_device(w.data, &w_host[0], w.len, stream);
qnPredict(handle, Xdev->data, N, D, 2, false, w.data, false, 0, preds.data,
stream);
raft::update_host(&preds_host[0], preds.data, preds.len, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
for (int it = 0; it < N; it++) {
ASSERT_TRUE(X[it][0] > 0 ? compApprox(preds_host[it], 1)
: compApprox(preds_host[it], 0));
}
qnPredict(handle, Xdev->data, N, D, 1, false, w.data, false, 1, preds.data,
stream);
raft::update_host(&preds_host[0], preds.data, preds.len, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
for (int it = 0; it < N; it++) {
ASSERT_TRUE(compApprox(X[it][0], preds_host[it]));
}
}
TEST_F(QuasiNewtonTest, predict_softmax) {
raft::CompareApprox<double> compApprox(1e-8);
int C = 4;
std::vector<double> w_host(C * D);
w_host[0] = 1;
w_host[D * C - 1] = 1;
std::vector<double> preds_host(N);
SimpleVecOwning<double> w(allocator, w_host.size(), stream);
SimpleVecOwning<double> preds(allocator, N, stream);
raft::update_device(w.data, &w_host[0], w.len, stream);
qnPredict(handle, Xdev->data, N, D, C, false, w.data, false, 2, preds.data,
stream);
raft::update_host(&preds_host[0], preds.data, preds.len, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
for (int it = 0; it < N; it++) {
if (X[it][0] < 0 && X[it][1] < 0) {
ASSERT_TRUE(compApprox(1, preds_host[it]));
} else if (X[it][0] > X[it][1]) {
ASSERT_TRUE(compApprox(0, preds_host[it]));
} else {
ASSERT_TRUE(compApprox(C - 1, preds_host[it]));
}
}
}
} // namespace GLM
} // end namespace ML
|
6ba40486e6b8584bcb552add234698bbb9f1c869.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#if defined(GRAVITY) && defined(GRAVITY_GPU)
#include <cmath>
#include "../gravity/grav3D.h"
#include "../grid/grid3D.h"
#include "../io/io.h"
#if defined(GRAV_ISOLATED_BOUNDARY_X) || defined(GRAV_ISOLATED_BOUNDARY_Y) || defined(GRAV_ISOLATED_BOUNDARY_Z)
void __global__ Set_Potential_Boundaries_Isolated_kernel(int direction, int side, int size_buffer, int n_i, int n_j,
int nx, int ny, int nz, int n_ghost, Real *potential_d,
Real *pot_boundary_d)
{
// get a global thread ID
int tid, tid_i, tid_j, tid_k, tid_buffer, tid_pot;
tid = threadIdx.x + blockIdx.x * blockDim.x;
tid_k = tid / (n_i * n_j);
tid_j = (tid - tid_k * n_i * n_j) / n_i;
tid_i = tid - tid_k * n_i * n_j - tid_j * n_i;
if (tid_i < 0 || tid_i >= n_i || tid_j < 0 || tid_j >= n_j || tid_k < 0 || tid_k >= n_ghost) {
return;
}
tid_buffer = tid_i + tid_j * n_i + tid_k * n_i * n_j;
if (direction == 0) {
if (side == 0) {
tid_pot = (tid_k) + (tid_i + n_ghost) * nx + (tid_j + n_ghost) * nx * ny;
}
if (side == 1) {
tid_pot = (nx - n_ghost + tid_k) + (tid_i + n_ghost) * nx + (tid_j + n_ghost) * nx * ny;
}
}
if (direction == 1) {
if (side == 0) {
tid_pot = (tid_i + n_ghost) + (tid_k)*nx + (tid_j + n_ghost) * nx * ny;
}
if (side == 1) {
tid_pot = (tid_i + n_ghost) + (ny - n_ghost + tid_k) * nx + (tid_j + n_ghost) * nx * ny;
}
}
if (direction == 2) {
if (side == 0) {
tid_pot = (tid_i + n_ghost) + (tid_j + n_ghost) * nx + (tid_k)*nx * ny;
}
if (side == 1) {
tid_pot = (tid_i + n_ghost) + (tid_j + n_ghost) * nx + (nz - n_ghost + tid_k) * nx * ny;
}
}
potential_d[tid_pot] = pot_boundary_d[tid_buffer];
}
void Grid3D::Set_Potential_Boundaries_Isolated_GPU(int direction, int side, int *flags)
{
int n_i, n_j, n_ghost, size_buffer;
int nx_g, ny_g, nz_g;
n_ghost = N_GHOST_POTENTIAL;
nx_g = Grav.nx_local + 2 * n_ghost;
ny_g = Grav.ny_local + 2 * n_ghost;
nz_g = Grav.nz_local + 2 * n_ghost;
Real *pot_boundary_h, *pot_boundary_d;
#ifdef GRAV_ISOLATED_BOUNDARY_X
if (direction == 0) {
n_i = Grav.ny_local;
n_j = Grav.nz_local;
if (side == 0) {
pot_boundary_h = Grav.F.pot_boundary_x0;
}
if (side == 1) {
pot_boundary_h = Grav.F.pot_boundary_x1;
}
if (side == 0) {
pot_boundary_d = Grav.F.pot_boundary_x0_d;
}
if (side == 1) {
pot_boundary_d = Grav.F.pot_boundary_x1_d;
}
}
#endif
#ifdef GRAV_ISOLATED_BOUNDARY_Y
if (direction == 1) {
n_i = Grav.nx_local;
n_j = Grav.nz_local;
if (side == 0) {
pot_boundary_h = Grav.F.pot_boundary_y0;
}
if (side == 1) {
pot_boundary_h = Grav.F.pot_boundary_y1;
}
if (side == 0) {
pot_boundary_d = Grav.F.pot_boundary_y0_d;
}
if (side == 1) {
pot_boundary_d = Grav.F.pot_boundary_y1_d;
}
}
#endif
#ifdef GRAV_ISOLATED_BOUNDARY_Z
if (direction == 2) {
n_i = Grav.nx_local;
n_j = Grav.ny_local;
if (side == 0) {
pot_boundary_h = Grav.F.pot_boundary_z0;
}
if (side == 1) {
pot_boundary_h = Grav.F.pot_boundary_z1;
}
if (side == 0) {
pot_boundary_d = Grav.F.pot_boundary_z0_d;
}
if (side == 1) {
pot_boundary_d = Grav.F.pot_boundary_z1_d;
}
}
#endif
size_buffer = N_GHOST_POTENTIAL * n_i * n_j;
// set values for GPU kernels
int ngrid = (size_buffer - 1) / TPB_GRAV + 1;
// number of blocks per 1D grid
dim3 dim1dGrid(ngrid, 1, 1);
// number of threads per 1D block
dim3 dim1dBlock(TPB_GRAV, 1, 1);
// Copy the boundary array from host to device
hipMemcpy(pot_boundary_d, pot_boundary_h, size_buffer * sizeof(Real), hipMemcpyHostToDevice);
hipDeviceSynchronize();
// Copy the potential boundary from buffer to potential array
hipLaunchKernelGGL(Set_Potential_Boundaries_Isolated_kernel, dim1dGrid, dim1dBlock, 0, 0, direction, side,
size_buffer, n_i, n_j, nx_g, ny_g, nz_g, n_ghost, Grav.F.potential_d, pot_boundary_d);
}
#endif // GRAV_ISOLATED_BOUNDARY
void __global__ Set_Potential_Boundaries_Periodic_kernel(int direction, int side, int n_i, int n_j, int nx, int ny,
int nz, int n_ghost, Real *potential_d)
{
// get a global thread ID
int tid, tid_i, tid_j, tid_k, tid_src, tid_dst;
tid = threadIdx.x + blockIdx.x * blockDim.x;
tid_k = tid / (n_i * n_j);
tid_j = (tid - tid_k * n_i * n_j) / n_i;
tid_i = tid - tid_k * n_i * n_j - tid_j * n_i;
if (tid_i < 0 || tid_i >= n_i || tid_j < 0 || tid_j >= n_j || tid_k < 0 || tid_k >= n_ghost) {
return;
}
if (direction == 0) {
if (side == 0) {
tid_src = (nx - 2 * n_ghost + tid_k) + (tid_i)*nx + (tid_j)*nx * ny;
}
if (side == 0) {
tid_dst = (tid_k) + (tid_i)*nx + (tid_j)*nx * ny;
}
if (side == 1) {
tid_src = (n_ghost + tid_k) + (tid_i)*nx + (tid_j)*nx * ny;
}
if (side == 1) {
tid_dst = (nx - n_ghost + tid_k) + (tid_i)*nx + (tid_j)*nx * ny;
}
}
if (direction == 1) {
if (side == 0) {
tid_src = (tid_i) + (ny - 2 * n_ghost + tid_k) * nx + (tid_j)*nx * ny;
}
if (side == 0) {
tid_dst = (tid_i) + (tid_k)*nx + (tid_j)*nx * ny;
}
if (side == 1) {
tid_src = (tid_i) + (n_ghost + tid_k) * nx + (tid_j)*nx * ny;
}
if (side == 1) {
tid_dst = (tid_i) + (ny - n_ghost + tid_k) * nx + (tid_j)*nx * ny;
}
}
if (direction == 2) {
if (side == 0) {
tid_src = (tid_i) + (tid_j)*nx + (nz - 2 * n_ghost + tid_k) * nx * ny;
}
if (side == 0) {
tid_dst = (tid_i) + (tid_j)*nx + (tid_k)*nx * ny;
}
if (side == 1) {
tid_src = (tid_i) + (tid_j)*nx + (n_ghost + tid_k) * nx * ny;
}
if (side == 1) {
tid_dst = (tid_i) + (tid_j)*nx + (nz - n_ghost + tid_k) * nx * ny;
}
}
potential_d[tid_dst] = potential_d[tid_src];
}
void Grid3D::Set_Potential_Boundaries_Periodic_GPU(int direction, int side, int *flags)
{
int n_i, n_j, n_ghost, size;
int nx_g, ny_g, nz_g;
n_ghost = N_GHOST_POTENTIAL;
nx_g = Grav.nx_local + 2 * n_ghost;
ny_g = Grav.ny_local + 2 * n_ghost;
nz_g = Grav.nz_local + 2 * n_ghost;
if (direction == 0) {
n_i = ny_g;
n_j = nz_g;
}
if (direction == 1) {
n_i = nx_g;
n_j = nz_g;
}
if (direction == 2) {
n_i = nx_g;
n_j = ny_g;
}
size = N_GHOST_POTENTIAL * n_i * n_j;
// set values for GPU kernels
int ngrid = (size - 1) / TPB_GRAV + 1;
// number of blocks per 1D grid
dim3 dim1dGrid(ngrid, 1, 1);
// number of threads per 1D block
dim3 dim1dBlock(TPB_GRAV, 1, 1);
// Copy the potential boundary from buffer to potential array
hipLaunchKernelGGL(Set_Potential_Boundaries_Periodic_kernel, dim1dGrid, dim1dBlock, 0, 0, direction, side, n_i, n_j,
nx_g, ny_g, nz_g, n_ghost, Grav.F.potential_d);
}
__global__ void Load_Transfer_Buffer_GPU_kernel(int direction, int side, int size_buffer, int n_i, int n_j, int nx,
int ny, int nz, int n_ghost_transfer, int n_ghost_potential,
Real *potential_d, Real *transfer_buffer_d)
{
// get a global thread ID
int tid, tid_i, tid_j, tid_k, tid_buffer, tid_pot;
tid = threadIdx.x + blockIdx.x * blockDim.x;
tid_k = tid / (n_i * n_j);
tid_j = (tid - tid_k * n_i * n_j) / n_i;
tid_i = tid - tid_k * n_i * n_j - tid_j * n_i;
if (tid_i < 0 || tid_i >= n_i || tid_j < 0 || tid_j >= n_j || tid_k < 0 || tid_k >= n_ghost_transfer) {
return;
}
tid_buffer = tid_i + tid_j * n_i + tid_k * n_i * n_j;
if (direction == 0) {
if (side == 0) {
tid_pot = (n_ghost_potential + tid_k) + (tid_i)*nx + (tid_j)*nx * ny;
}
if (side == 1) {
tid_pot = (nx - n_ghost_potential - n_ghost_transfer + tid_k) + (tid_i)*nx + (tid_j)*nx * ny;
}
}
if (direction == 1) {
if (side == 0) {
tid_pot = (tid_i) + (n_ghost_potential + tid_k) * nx + (tid_j)*nx * ny;
}
if (side == 1) {
tid_pot = (tid_i) + (ny - n_ghost_potential - n_ghost_transfer + tid_k) * nx + (tid_j)*nx * ny;
}
}
if (direction == 2) {
if (side == 0) {
tid_pot = (tid_i) + (tid_j)*nx + (n_ghost_potential + tid_k) * nx * ny;
}
if (side == 1) {
tid_pot = (tid_i) + (tid_j)*nx + (nz - n_ghost_potential - n_ghost_transfer + tid_k) * nx * ny;
}
}
transfer_buffer_d[tid_buffer] = potential_d[tid_pot];
}
int Grid3D::Load_Gravity_Potential_To_Buffer_GPU(int direction, int side, Real *buffer, int buffer_start)
{
// printf( "Loading Gravity Buffer: Dir %d side: %d \n", direction, side );
int nx_pot, ny_pot, nz_pot, size_buffer, n_ghost_potential, n_ghost_transfer, n_i, n_j, ngrid;
;
n_ghost_potential = N_GHOST_POTENTIAL;
n_ghost_transfer = N_GHOST_POTENTIAL;
nx_pot = Grav.nx_local + 2 * n_ghost_potential;
ny_pot = Grav.ny_local + 2 * n_ghost_potential;
nz_pot = Grav.nz_local + 2 * n_ghost_potential;
if (direction == 0) {
n_i = ny_pot;
n_j = nz_pot;
}
if (direction == 1) {
n_i = nx_pot;
n_j = nz_pot;
}
if (direction == 2) {
n_i = nx_pot;
n_j = ny_pot;
}
size_buffer = n_ghost_transfer * n_i * n_j;
// set values for GPU kernels
ngrid = (size_buffer - 1) / TPB_GRAV + 1;
// number of blocks per 1D grid
dim3 dim1dGrid(ngrid, 1, 1);
// number of threads per 1D block
dim3 dim1dBlock(TPB_GRAV, 1, 1);
Real *potential_d;
potential_d = (Real *)Grav.F.potential_d;
Real *send_buffer_d;
send_buffer_d = buffer;
hipLaunchKernelGGL(Load_Transfer_Buffer_GPU_kernel, dim1dGrid, dim1dBlock, 0, 0, direction, side, size_buffer, n_i,
n_j, nx_pot, ny_pot, nz_pot, n_ghost_transfer, n_ghost_potential, potential_d, send_buffer_d);
CHECK(hipDeviceSynchronize());
return size_buffer;
}
__global__ void Unload_Transfer_Buffer_GPU_kernel(int direction, int side, int size_buffer, int n_i, int n_j, int nx,
int ny, int nz, int n_ghost_transfer, int n_ghost_potential,
Real *potential_d, Real *transfer_buffer_d)
{
// get a global thread ID
int tid, tid_i, tid_j, tid_k, tid_buffer, tid_pot;
tid = threadIdx.x + blockIdx.x * blockDim.x;
tid_k = tid / (n_i * n_j);
tid_j = (tid - tid_k * n_i * n_j) / n_i;
tid_i = tid - tid_k * n_i * n_j - tid_j * n_i;
if (tid_i < 0 || tid_i >= n_i || tid_j < 0 || tid_j >= n_j || tid_k < 0 || tid_k >= n_ghost_transfer) {
return;
}
tid_buffer = tid_i + tid_j * n_i + tid_k * n_i * n_j;
if (direction == 0) {
if (side == 0) {
tid_pot = (n_ghost_potential - n_ghost_transfer + tid_k) + (tid_i)*nx + (tid_j)*nx * ny;
}
if (side == 1) {
tid_pot = (nx - n_ghost_potential + tid_k) + (tid_i)*nx + (tid_j)*nx * ny;
}
}
if (direction == 1) {
if (side == 0) {
tid_pot = (tid_i) + (n_ghost_potential - n_ghost_transfer + tid_k) * nx + (tid_j)*nx * ny;
}
if (side == 1) {
tid_pot = (tid_i) + (ny - n_ghost_potential + tid_k) * nx + (tid_j)*nx * ny;
}
}
if (direction == 2) {
if (side == 0) {
tid_pot = (tid_i) + (tid_j)*nx + (n_ghost_potential - n_ghost_transfer + tid_k) * nx * ny;
}
if (side == 1) {
tid_pot = (tid_i) + (tid_j)*nx + (nz - n_ghost_potential + tid_k) * nx * ny;
}
}
potential_d[tid_pot] = transfer_buffer_d[tid_buffer];
}
void Grid3D::Unload_Gravity_Potential_from_Buffer_GPU(int direction, int side, Real *buffer, int buffer_start)
{
// printf( "Loading Gravity Buffer: Dir %d side: %d \n", direction, side );
int nx_pot, ny_pot, nz_pot, size_buffer, n_ghost_potential, n_ghost_transfer, n_i, n_j, ngrid;
;
n_ghost_potential = N_GHOST_POTENTIAL;
n_ghost_transfer = N_GHOST_POTENTIAL;
nx_pot = Grav.nx_local + 2 * n_ghost_potential;
ny_pot = Grav.ny_local + 2 * n_ghost_potential;
nz_pot = Grav.nz_local + 2 * n_ghost_potential;
if (direction == 0) {
n_i = ny_pot;
n_j = nz_pot;
}
if (direction == 1) {
n_i = nx_pot;
n_j = nz_pot;
}
if (direction == 2) {
n_i = nx_pot;
n_j = ny_pot;
}
size_buffer = n_ghost_transfer * n_i * n_j;
// set values for GPU kernels
ngrid = (size_buffer - 1) / TPB_GRAV + 1;
// number of blocks per 1D grid
dim3 dim1dGrid(ngrid, 1, 1);
// number of threads per 1D block
dim3 dim1dBlock(TPB_GRAV, 1, 1);
Real *potential_d;
potential_d = (Real *)Grav.F.potential_d;
Real *recv_buffer_d;
recv_buffer_d = buffer;
hipLaunchKernelGGL(Unload_Transfer_Buffer_GPU_kernel, dim1dGrid, dim1dBlock, 0, 0, direction, side, size_buffer, n_i,
n_j, nx_pot, ny_pot, nz_pot, n_ghost_transfer, n_ghost_potential, potential_d, recv_buffer_d);
}
#endif // GRAVITY
|
6ba40486e6b8584bcb552add234698bbb9f1c869.cu
|
#if defined(GRAVITY) && defined(GRAVITY_GPU)
#include <cmath>
#include "../gravity/grav3D.h"
#include "../grid/grid3D.h"
#include "../io/io.h"
#if defined(GRAV_ISOLATED_BOUNDARY_X) || defined(GRAV_ISOLATED_BOUNDARY_Y) || defined(GRAV_ISOLATED_BOUNDARY_Z)
void __global__ Set_Potential_Boundaries_Isolated_kernel(int direction, int side, int size_buffer, int n_i, int n_j,
int nx, int ny, int nz, int n_ghost, Real *potential_d,
Real *pot_boundary_d)
{
// get a global thread ID
int tid, tid_i, tid_j, tid_k, tid_buffer, tid_pot;
tid = threadIdx.x + blockIdx.x * blockDim.x;
tid_k = tid / (n_i * n_j);
tid_j = (tid - tid_k * n_i * n_j) / n_i;
tid_i = tid - tid_k * n_i * n_j - tid_j * n_i;
if (tid_i < 0 || tid_i >= n_i || tid_j < 0 || tid_j >= n_j || tid_k < 0 || tid_k >= n_ghost) {
return;
}
tid_buffer = tid_i + tid_j * n_i + tid_k * n_i * n_j;
if (direction == 0) {
if (side == 0) {
tid_pot = (tid_k) + (tid_i + n_ghost) * nx + (tid_j + n_ghost) * nx * ny;
}
if (side == 1) {
tid_pot = (nx - n_ghost + tid_k) + (tid_i + n_ghost) * nx + (tid_j + n_ghost) * nx * ny;
}
}
if (direction == 1) {
if (side == 0) {
tid_pot = (tid_i + n_ghost) + (tid_k)*nx + (tid_j + n_ghost) * nx * ny;
}
if (side == 1) {
tid_pot = (tid_i + n_ghost) + (ny - n_ghost + tid_k) * nx + (tid_j + n_ghost) * nx * ny;
}
}
if (direction == 2) {
if (side == 0) {
tid_pot = (tid_i + n_ghost) + (tid_j + n_ghost) * nx + (tid_k)*nx * ny;
}
if (side == 1) {
tid_pot = (tid_i + n_ghost) + (tid_j + n_ghost) * nx + (nz - n_ghost + tid_k) * nx * ny;
}
}
potential_d[tid_pot] = pot_boundary_d[tid_buffer];
}
void Grid3D::Set_Potential_Boundaries_Isolated_GPU(int direction, int side, int *flags)
{
int n_i, n_j, n_ghost, size_buffer;
int nx_g, ny_g, nz_g;
n_ghost = N_GHOST_POTENTIAL;
nx_g = Grav.nx_local + 2 * n_ghost;
ny_g = Grav.ny_local + 2 * n_ghost;
nz_g = Grav.nz_local + 2 * n_ghost;
Real *pot_boundary_h, *pot_boundary_d;
#ifdef GRAV_ISOLATED_BOUNDARY_X
if (direction == 0) {
n_i = Grav.ny_local;
n_j = Grav.nz_local;
if (side == 0) {
pot_boundary_h = Grav.F.pot_boundary_x0;
}
if (side == 1) {
pot_boundary_h = Grav.F.pot_boundary_x1;
}
if (side == 0) {
pot_boundary_d = Grav.F.pot_boundary_x0_d;
}
if (side == 1) {
pot_boundary_d = Grav.F.pot_boundary_x1_d;
}
}
#endif
#ifdef GRAV_ISOLATED_BOUNDARY_Y
if (direction == 1) {
n_i = Grav.nx_local;
n_j = Grav.nz_local;
if (side == 0) {
pot_boundary_h = Grav.F.pot_boundary_y0;
}
if (side == 1) {
pot_boundary_h = Grav.F.pot_boundary_y1;
}
if (side == 0) {
pot_boundary_d = Grav.F.pot_boundary_y0_d;
}
if (side == 1) {
pot_boundary_d = Grav.F.pot_boundary_y1_d;
}
}
#endif
#ifdef GRAV_ISOLATED_BOUNDARY_Z
if (direction == 2) {
n_i = Grav.nx_local;
n_j = Grav.ny_local;
if (side == 0) {
pot_boundary_h = Grav.F.pot_boundary_z0;
}
if (side == 1) {
pot_boundary_h = Grav.F.pot_boundary_z1;
}
if (side == 0) {
pot_boundary_d = Grav.F.pot_boundary_z0_d;
}
if (side == 1) {
pot_boundary_d = Grav.F.pot_boundary_z1_d;
}
}
#endif
size_buffer = N_GHOST_POTENTIAL * n_i * n_j;
// set values for GPU kernels
int ngrid = (size_buffer - 1) / TPB_GRAV + 1;
// number of blocks per 1D grid
dim3 dim1dGrid(ngrid, 1, 1);
// number of threads per 1D block
dim3 dim1dBlock(TPB_GRAV, 1, 1);
// Copy the boundary array from host to device
cudaMemcpy(pot_boundary_d, pot_boundary_h, size_buffer * sizeof(Real), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
// Copy the potential boundary from buffer to potential array
hipLaunchKernelGGL(Set_Potential_Boundaries_Isolated_kernel, dim1dGrid, dim1dBlock, 0, 0, direction, side,
size_buffer, n_i, n_j, nx_g, ny_g, nz_g, n_ghost, Grav.F.potential_d, pot_boundary_d);
}
#endif // GRAV_ISOLATED_BOUNDARY
void __global__ Set_Potential_Boundaries_Periodic_kernel(int direction, int side, int n_i, int n_j, int nx, int ny,
int nz, int n_ghost, Real *potential_d)
{
// get a global thread ID
int tid, tid_i, tid_j, tid_k, tid_src, tid_dst;
tid = threadIdx.x + blockIdx.x * blockDim.x;
tid_k = tid / (n_i * n_j);
tid_j = (tid - tid_k * n_i * n_j) / n_i;
tid_i = tid - tid_k * n_i * n_j - tid_j * n_i;
if (tid_i < 0 || tid_i >= n_i || tid_j < 0 || tid_j >= n_j || tid_k < 0 || tid_k >= n_ghost) {
return;
}
if (direction == 0) {
if (side == 0) {
tid_src = (nx - 2 * n_ghost + tid_k) + (tid_i)*nx + (tid_j)*nx * ny;
}
if (side == 0) {
tid_dst = (tid_k) + (tid_i)*nx + (tid_j)*nx * ny;
}
if (side == 1) {
tid_src = (n_ghost + tid_k) + (tid_i)*nx + (tid_j)*nx * ny;
}
if (side == 1) {
tid_dst = (nx - n_ghost + tid_k) + (tid_i)*nx + (tid_j)*nx * ny;
}
}
if (direction == 1) {
if (side == 0) {
tid_src = (tid_i) + (ny - 2 * n_ghost + tid_k) * nx + (tid_j)*nx * ny;
}
if (side == 0) {
tid_dst = (tid_i) + (tid_k)*nx + (tid_j)*nx * ny;
}
if (side == 1) {
tid_src = (tid_i) + (n_ghost + tid_k) * nx + (tid_j)*nx * ny;
}
if (side == 1) {
tid_dst = (tid_i) + (ny - n_ghost + tid_k) * nx + (tid_j)*nx * ny;
}
}
if (direction == 2) {
if (side == 0) {
tid_src = (tid_i) + (tid_j)*nx + (nz - 2 * n_ghost + tid_k) * nx * ny;
}
if (side == 0) {
tid_dst = (tid_i) + (tid_j)*nx + (tid_k)*nx * ny;
}
if (side == 1) {
tid_src = (tid_i) + (tid_j)*nx + (n_ghost + tid_k) * nx * ny;
}
if (side == 1) {
tid_dst = (tid_i) + (tid_j)*nx + (nz - n_ghost + tid_k) * nx * ny;
}
}
potential_d[tid_dst] = potential_d[tid_src];
}
void Grid3D::Set_Potential_Boundaries_Periodic_GPU(int direction, int side, int *flags)
{
int n_i, n_j, n_ghost, size;
int nx_g, ny_g, nz_g;
n_ghost = N_GHOST_POTENTIAL;
nx_g = Grav.nx_local + 2 * n_ghost;
ny_g = Grav.ny_local + 2 * n_ghost;
nz_g = Grav.nz_local + 2 * n_ghost;
if (direction == 0) {
n_i = ny_g;
n_j = nz_g;
}
if (direction == 1) {
n_i = nx_g;
n_j = nz_g;
}
if (direction == 2) {
n_i = nx_g;
n_j = ny_g;
}
size = N_GHOST_POTENTIAL * n_i * n_j;
// set values for GPU kernels
int ngrid = (size - 1) / TPB_GRAV + 1;
// number of blocks per 1D grid
dim3 dim1dGrid(ngrid, 1, 1);
// number of threads per 1D block
dim3 dim1dBlock(TPB_GRAV, 1, 1);
// Copy the potential boundary from buffer to potential array
hipLaunchKernelGGL(Set_Potential_Boundaries_Periodic_kernel, dim1dGrid, dim1dBlock, 0, 0, direction, side, n_i, n_j,
nx_g, ny_g, nz_g, n_ghost, Grav.F.potential_d);
}
__global__ void Load_Transfer_Buffer_GPU_kernel(int direction, int side, int size_buffer, int n_i, int n_j, int nx,
int ny, int nz, int n_ghost_transfer, int n_ghost_potential,
Real *potential_d, Real *transfer_buffer_d)
{
// get a global thread ID
int tid, tid_i, tid_j, tid_k, tid_buffer, tid_pot;
tid = threadIdx.x + blockIdx.x * blockDim.x;
tid_k = tid / (n_i * n_j);
tid_j = (tid - tid_k * n_i * n_j) / n_i;
tid_i = tid - tid_k * n_i * n_j - tid_j * n_i;
if (tid_i < 0 || tid_i >= n_i || tid_j < 0 || tid_j >= n_j || tid_k < 0 || tid_k >= n_ghost_transfer) {
return;
}
tid_buffer = tid_i + tid_j * n_i + tid_k * n_i * n_j;
if (direction == 0) {
if (side == 0) {
tid_pot = (n_ghost_potential + tid_k) + (tid_i)*nx + (tid_j)*nx * ny;
}
if (side == 1) {
tid_pot = (nx - n_ghost_potential - n_ghost_transfer + tid_k) + (tid_i)*nx + (tid_j)*nx * ny;
}
}
if (direction == 1) {
if (side == 0) {
tid_pot = (tid_i) + (n_ghost_potential + tid_k) * nx + (tid_j)*nx * ny;
}
if (side == 1) {
tid_pot = (tid_i) + (ny - n_ghost_potential - n_ghost_transfer + tid_k) * nx + (tid_j)*nx * ny;
}
}
if (direction == 2) {
if (side == 0) {
tid_pot = (tid_i) + (tid_j)*nx + (n_ghost_potential + tid_k) * nx * ny;
}
if (side == 1) {
tid_pot = (tid_i) + (tid_j)*nx + (nz - n_ghost_potential - n_ghost_transfer + tid_k) * nx * ny;
}
}
transfer_buffer_d[tid_buffer] = potential_d[tid_pot];
}
int Grid3D::Load_Gravity_Potential_To_Buffer_GPU(int direction, int side, Real *buffer, int buffer_start)
{
// printf( "Loading Gravity Buffer: Dir %d side: %d \n", direction, side );
int nx_pot, ny_pot, nz_pot, size_buffer, n_ghost_potential, n_ghost_transfer, n_i, n_j, ngrid;
;
n_ghost_potential = N_GHOST_POTENTIAL;
n_ghost_transfer = N_GHOST_POTENTIAL;
nx_pot = Grav.nx_local + 2 * n_ghost_potential;
ny_pot = Grav.ny_local + 2 * n_ghost_potential;
nz_pot = Grav.nz_local + 2 * n_ghost_potential;
if (direction == 0) {
n_i = ny_pot;
n_j = nz_pot;
}
if (direction == 1) {
n_i = nx_pot;
n_j = nz_pot;
}
if (direction == 2) {
n_i = nx_pot;
n_j = ny_pot;
}
size_buffer = n_ghost_transfer * n_i * n_j;
// set values for GPU kernels
ngrid = (size_buffer - 1) / TPB_GRAV + 1;
// number of blocks per 1D grid
dim3 dim1dGrid(ngrid, 1, 1);
// number of threads per 1D block
dim3 dim1dBlock(TPB_GRAV, 1, 1);
Real *potential_d;
potential_d = (Real *)Grav.F.potential_d;
Real *send_buffer_d;
send_buffer_d = buffer;
hipLaunchKernelGGL(Load_Transfer_Buffer_GPU_kernel, dim1dGrid, dim1dBlock, 0, 0, direction, side, size_buffer, n_i,
n_j, nx_pot, ny_pot, nz_pot, n_ghost_transfer, n_ghost_potential, potential_d, send_buffer_d);
CHECK(cudaDeviceSynchronize());
return size_buffer;
}
__global__ void Unload_Transfer_Buffer_GPU_kernel(int direction, int side, int size_buffer, int n_i, int n_j, int nx,
int ny, int nz, int n_ghost_transfer, int n_ghost_potential,
Real *potential_d, Real *transfer_buffer_d)
{
// get a global thread ID
int tid, tid_i, tid_j, tid_k, tid_buffer, tid_pot;
tid = threadIdx.x + blockIdx.x * blockDim.x;
tid_k = tid / (n_i * n_j);
tid_j = (tid - tid_k * n_i * n_j) / n_i;
tid_i = tid - tid_k * n_i * n_j - tid_j * n_i;
if (tid_i < 0 || tid_i >= n_i || tid_j < 0 || tid_j >= n_j || tid_k < 0 || tid_k >= n_ghost_transfer) {
return;
}
tid_buffer = tid_i + tid_j * n_i + tid_k * n_i * n_j;
if (direction == 0) {
if (side == 0) {
tid_pot = (n_ghost_potential - n_ghost_transfer + tid_k) + (tid_i)*nx + (tid_j)*nx * ny;
}
if (side == 1) {
tid_pot = (nx - n_ghost_potential + tid_k) + (tid_i)*nx + (tid_j)*nx * ny;
}
}
if (direction == 1) {
if (side == 0) {
tid_pot = (tid_i) + (n_ghost_potential - n_ghost_transfer + tid_k) * nx + (tid_j)*nx * ny;
}
if (side == 1) {
tid_pot = (tid_i) + (ny - n_ghost_potential + tid_k) * nx + (tid_j)*nx * ny;
}
}
if (direction == 2) {
if (side == 0) {
tid_pot = (tid_i) + (tid_j)*nx + (n_ghost_potential - n_ghost_transfer + tid_k) * nx * ny;
}
if (side == 1) {
tid_pot = (tid_i) + (tid_j)*nx + (nz - n_ghost_potential + tid_k) * nx * ny;
}
}
potential_d[tid_pot] = transfer_buffer_d[tid_buffer];
}
void Grid3D::Unload_Gravity_Potential_from_Buffer_GPU(int direction, int side, Real *buffer, int buffer_start)
{
// printf( "Loading Gravity Buffer: Dir %d side: %d \n", direction, side );
int nx_pot, ny_pot, nz_pot, size_buffer, n_ghost_potential, n_ghost_transfer, n_i, n_j, ngrid;
;
n_ghost_potential = N_GHOST_POTENTIAL;
n_ghost_transfer = N_GHOST_POTENTIAL;
nx_pot = Grav.nx_local + 2 * n_ghost_potential;
ny_pot = Grav.ny_local + 2 * n_ghost_potential;
nz_pot = Grav.nz_local + 2 * n_ghost_potential;
if (direction == 0) {
n_i = ny_pot;
n_j = nz_pot;
}
if (direction == 1) {
n_i = nx_pot;
n_j = nz_pot;
}
if (direction == 2) {
n_i = nx_pot;
n_j = ny_pot;
}
size_buffer = n_ghost_transfer * n_i * n_j;
// set values for GPU kernels
ngrid = (size_buffer - 1) / TPB_GRAV + 1;
// number of blocks per 1D grid
dim3 dim1dGrid(ngrid, 1, 1);
// number of threads per 1D block
dim3 dim1dBlock(TPB_GRAV, 1, 1);
Real *potential_d;
potential_d = (Real *)Grav.F.potential_d;
Real *recv_buffer_d;
recv_buffer_d = buffer;
hipLaunchKernelGGL(Unload_Transfer_Buffer_GPU_kernel, dim1dGrid, dim1dBlock, 0, 0, direction, side, size_buffer, n_i,
n_j, nx_pot, ny_pot, nz_pot, n_ghost_transfer, n_ghost_potential, potential_d, recv_buffer_d);
}
#endif // GRAVITY
|
b7ca841c8320af46dd1039cff49b26cf9806efe7.hip
|
// !!! This is a file automatically generated by hipify!!!
/*******************************************************
* Copyright (c) 2014, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
#include <Array.hpp>
#include <copy.hpp>
#include <debug_cuda.hpp>
#include <set.hpp>
#include <sort.hpp>
#include <af/dim4.hpp>
#include <algorithm>
#include <thrust/device_ptr.h>
#include <thrust/set_operations.h>
#include <thrust/sort.h>
#include <thrust/unique.h>
namespace cuda {
using af::dim4;
template<typename T>
Array<T> setUnique(const Array<T> &in, const bool is_sorted) {
Array<T> out = copyArray<T>(in);
thrust::device_ptr<T> out_ptr = thrust::device_pointer_cast<T>(out.get());
thrust::device_ptr<T> out_ptr_end = out_ptr + out.elements();
if (!is_sorted) THRUST_SELECT(thrust::sort, out_ptr, out_ptr_end);
thrust::device_ptr<T> out_ptr_last;
THRUST_SELECT_OUT(out_ptr_last, thrust::unique, out_ptr, out_ptr_end);
out.resetDims(dim4(thrust::distance(out_ptr, out_ptr_last)));
return out;
}
template<typename T>
Array<T> setUnion(const Array<T> &first, const Array<T> &second,
const bool is_unique) {
Array<T> unique_first = first;
Array<T> unique_second = second;
if (!is_unique) {
unique_first = setUnique(first, false);
unique_second = setUnique(second, false);
}
dim_t out_size = unique_first.elements() + unique_second.elements();
Array<T> out = createEmptyArray<T>(dim4(out_size));
thrust::device_ptr<T> first_ptr =
thrust::device_pointer_cast<T>(unique_first.get());
thrust::device_ptr<T> first_ptr_end = first_ptr + unique_first.elements();
thrust::device_ptr<T> second_ptr =
thrust::device_pointer_cast<T>(unique_second.get());
thrust::device_ptr<T> second_ptr_end =
second_ptr + unique_second.elements();
thrust::device_ptr<T> out_ptr = thrust::device_pointer_cast<T>(out.get());
thrust::device_ptr<T> out_ptr_last;
THRUST_SELECT_OUT(out_ptr_last, thrust::set_union, first_ptr, first_ptr_end,
second_ptr, second_ptr_end, out_ptr);
out.resetDims(dim4(thrust::distance(out_ptr, out_ptr_last)));
return out;
}
template<typename T>
Array<T> setIntersect(const Array<T> &first, const Array<T> &second,
const bool is_unique) {
Array<T> unique_first = first;
Array<T> unique_second = second;
if (!is_unique) {
unique_first = setUnique(first, false);
unique_second = setUnique(second, false);
}
dim_t out_size =
::max(unique_first.elements(), unique_second.elements());
Array<T> out = createEmptyArray<T>(dim4(out_size));
thrust::device_ptr<T> first_ptr =
thrust::device_pointer_cast<T>(unique_first.get());
thrust::device_ptr<T> first_ptr_end = first_ptr + unique_first.elements();
thrust::device_ptr<T> second_ptr =
thrust::device_pointer_cast<T>(unique_second.get());
thrust::device_ptr<T> second_ptr_end =
second_ptr + unique_second.elements();
thrust::device_ptr<T> out_ptr = thrust::device_pointer_cast<T>(out.get());
thrust::device_ptr<T> out_ptr_last;
THRUST_SELECT_OUT(out_ptr_last, thrust::set_intersection, first_ptr,
first_ptr_end, second_ptr, second_ptr_end, out_ptr);
out.resetDims(dim4(thrust::distance(out_ptr, out_ptr_last)));
return out;
}
#define INSTANTIATE(T) \
template Array<T> setUnique<T>(const Array<T> &in, const bool is_sorted); \
template Array<T> setUnion<T>( \
const Array<T> &first, const Array<T> &second, const bool is_unique); \
template Array<T> setIntersect<T>( \
const Array<T> &first, const Array<T> &second, const bool is_unique);
INSTANTIATE(float)
INSTANTIATE(double)
INSTANTIATE(int)
INSTANTIATE(uint)
INSTANTIATE(char)
INSTANTIATE(uchar)
INSTANTIATE(short)
INSTANTIATE(ushort)
INSTANTIATE(intl)
INSTANTIATE(uintl)
} // namespace cuda
|
b7ca841c8320af46dd1039cff49b26cf9806efe7.cu
|
/*******************************************************
* Copyright (c) 2014, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
#include <Array.hpp>
#include <copy.hpp>
#include <debug_cuda.hpp>
#include <set.hpp>
#include <sort.hpp>
#include <af/dim4.hpp>
#include <algorithm>
#include <thrust/device_ptr.h>
#include <thrust/set_operations.h>
#include <thrust/sort.h>
#include <thrust/unique.h>
namespace cuda {
using af::dim4;
template<typename T>
Array<T> setUnique(const Array<T> &in, const bool is_sorted) {
Array<T> out = copyArray<T>(in);
thrust::device_ptr<T> out_ptr = thrust::device_pointer_cast<T>(out.get());
thrust::device_ptr<T> out_ptr_end = out_ptr + out.elements();
if (!is_sorted) THRUST_SELECT(thrust::sort, out_ptr, out_ptr_end);
thrust::device_ptr<T> out_ptr_last;
THRUST_SELECT_OUT(out_ptr_last, thrust::unique, out_ptr, out_ptr_end);
out.resetDims(dim4(thrust::distance(out_ptr, out_ptr_last)));
return out;
}
template<typename T>
Array<T> setUnion(const Array<T> &first, const Array<T> &second,
const bool is_unique) {
Array<T> unique_first = first;
Array<T> unique_second = second;
if (!is_unique) {
unique_first = setUnique(first, false);
unique_second = setUnique(second, false);
}
dim_t out_size = unique_first.elements() + unique_second.elements();
Array<T> out = createEmptyArray<T>(dim4(out_size));
thrust::device_ptr<T> first_ptr =
thrust::device_pointer_cast<T>(unique_first.get());
thrust::device_ptr<T> first_ptr_end = first_ptr + unique_first.elements();
thrust::device_ptr<T> second_ptr =
thrust::device_pointer_cast<T>(unique_second.get());
thrust::device_ptr<T> second_ptr_end =
second_ptr + unique_second.elements();
thrust::device_ptr<T> out_ptr = thrust::device_pointer_cast<T>(out.get());
thrust::device_ptr<T> out_ptr_last;
THRUST_SELECT_OUT(out_ptr_last, thrust::set_union, first_ptr, first_ptr_end,
second_ptr, second_ptr_end, out_ptr);
out.resetDims(dim4(thrust::distance(out_ptr, out_ptr_last)));
return out;
}
template<typename T>
Array<T> setIntersect(const Array<T> &first, const Array<T> &second,
const bool is_unique) {
Array<T> unique_first = first;
Array<T> unique_second = second;
if (!is_unique) {
unique_first = setUnique(first, false);
unique_second = setUnique(second, false);
}
dim_t out_size =
std::max(unique_first.elements(), unique_second.elements());
Array<T> out = createEmptyArray<T>(dim4(out_size));
thrust::device_ptr<T> first_ptr =
thrust::device_pointer_cast<T>(unique_first.get());
thrust::device_ptr<T> first_ptr_end = first_ptr + unique_first.elements();
thrust::device_ptr<T> second_ptr =
thrust::device_pointer_cast<T>(unique_second.get());
thrust::device_ptr<T> second_ptr_end =
second_ptr + unique_second.elements();
thrust::device_ptr<T> out_ptr = thrust::device_pointer_cast<T>(out.get());
thrust::device_ptr<T> out_ptr_last;
THRUST_SELECT_OUT(out_ptr_last, thrust::set_intersection, first_ptr,
first_ptr_end, second_ptr, second_ptr_end, out_ptr);
out.resetDims(dim4(thrust::distance(out_ptr, out_ptr_last)));
return out;
}
#define INSTANTIATE(T) \
template Array<T> setUnique<T>(const Array<T> &in, const bool is_sorted); \
template Array<T> setUnion<T>( \
const Array<T> &first, const Array<T> &second, const bool is_unique); \
template Array<T> setIntersect<T>( \
const Array<T> &first, const Array<T> &second, const bool is_unique);
INSTANTIATE(float)
INSTANTIATE(double)
INSTANTIATE(int)
INSTANTIATE(uint)
INSTANTIATE(char)
INSTANTIATE(uchar)
INSTANTIATE(short)
INSTANTIATE(ushort)
INSTANTIATE(intl)
INSTANTIATE(uintl)
} // namespace cuda
|
fed5705eeb4ad04a271b5b839c90dd945a8f3c4c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "image-process.h"
//#if defined(_MSC_VER) && (_MSC_VER >= 1600)
//#pragma execution_character_set("utf-8")
//#endif
//#include <iostream>
//#include <cmath>
//#include <limits.h>
//#include <hip/hip_runtime.h>
//#include <opencv2/opencv.hpp>
//using namespace cv;
//using namespace std;
//// cpu:
//extern "C" void resizeImage(const Mat &src, Mat &dst, const Size &s);
//// cpu: xlen ylen
//extern "C" void transferImage(const Mat &src, Mat &dst, int xlen, int ylen);
//// cpu: num = 0 | 1
//extern "C" void mirrorImage(const Mat &src, Mat &dst, int num);
//// cpu:
//extern "C" void rotateImage(const Mat &src, Mat &dst, int degree);
////
//extern "C" void cutImage(const Mat &src, Mat &dst, int dir, int len);
// cuda
//extern "C" bool initCUDA();
//extern "C" void resizeImageGPU(const Mat &_src, Mat &_dst, const Size &s);
//extern "C" void transferImageGPU(const Mat &_src, Mat &_dst, int xlen, int ylen);
//extern "C" void mirrorImageGPU(const Mat &_src, Mat &_dst, int num);
//extern "C" void rotateImageGPU(const Mat &src, Mat &dst, int degree);
//extern "C" void cutImageGPU(const Mat &_src, Mat &_dst, int dir, int len);
// cpu:
extern "C"
void resizeImage(const Mat &src, Mat &dst, const Size &s)
{
dst = Mat::zeros(s, CV_8UC3);
double fRows = s.height / (float)src.rows;
double fCols = s.width / (float)src.cols;
int pX = 0;
int pY = 0;
for (int i = 0; i != dst.rows; ++i) {
for (int j = 0; j != dst.cols; ++j) {
pX = cvRound(i / (double)fRows); //
pY = cvRound(j / (double)fCols);
if (pX < src.rows && pX >= 0 && pY < src.cols && pY >= 0) {
dst.at<Vec3b>(i, j)[0] = src.at<Vec3b>(pX, pY)[0]; // B
dst.at<Vec3b>(i, j)[1] = src.at<Vec3b>(pX, pY)[1]; // G
dst.at<Vec3b>(i, j)[2] = src.at<Vec3b>(pX, pY)[2]; // R
}
}
}
}
// cpu: xlen ylen
extern "C"
void transferImage(const Mat &src, Mat &dst, int xlen, int ylen)
{
int width = src.cols, height = src.rows;
width += abs(xlen);
height += abs(ylen);
dst = Mat::zeros(Size(width, height), CV_8UC3);
int xadd = xlen < 0 ? 0 : abs(xlen);
int yadd = ylen < 0 ? abs(ylen) : 0;
for (int i = 0; i != src.rows; ++i) {
for (int j = 0; j != src.cols; ++j) {
dst.at<Vec3b>(i + yadd, j + xadd)[0] = src.at<Vec3b>(i, j)[0];
dst.at<Vec3b>(i + yadd, j + xadd)[1] = src.at<Vec3b>(i, j)[1];
dst.at<Vec3b>(i + yadd, j + xadd)[2] = src.at<Vec3b>(i, j)[2];
}
}
}
// cpu: num = 0 | 1; (0:x1:y)
extern "C"
void mirrorImage(const Mat &src, Mat &dst, int num)
{
dst = Mat::zeros(Size(src.cols, src.rows), CV_8UC3);
if (0 == num) {
for (int i = 0, x = src.rows - 1; i != src.rows; ++i, --x) {
for (int j = 0, y = 0; j != src.cols; ++j, ++y) {
dst.at<Vec3b>(x, y)[0] = src.at<Vec3b>(i, j)[0];
dst.at<Vec3b>(x, y)[1] = src.at<Vec3b>(i, j)[1];
dst.at<Vec3b>(x, y)[2] = src.at<Vec3b>(i, j)[2];
}
}
}
else {
for (int i = 0, x = 0; i != src.rows; ++i, ++x) {
for (int j = 0, y = src.cols - 1; j != src.cols; ++j, --y) {
dst.at<Vec3b>(x, y)[0] = src.at<Vec3b>(i, j)[0];
dst.at<Vec3b>(x, y)[1] = src.at<Vec3b>(i, j)[1];
dst.at<Vec3b>(x, y)[2] = src.at<Vec3b>(i, j)[2];
}
}
}
}
// cpu: http://blog.csdn.net/ab1322583838/article/details/52102732 http://blog.csdn.net/fengbingchun/article/details/17713429
extern "C"
void rotateImage(const Mat &src, Mat &dst, int degree)
{
degree = -degree; //
double angle = degree * CV_PI / 180.; //
double a = sin(angle), b = cos(angle);
int width = src.cols, height = src.rows;
//
int width_rotate = int(height * fabs(a) + width * fabs(b));
int height_rotate = int(width * fabs(a) + height * fabs(b));
dst = Mat::zeros(Size(width_rotate, height_rotate), CV_8UC3);
// map
// [ m0 m1 m2 ] ===> [ A11 A12 b1 ]
// [ m3 m4 m5 ] ===> [ A21 A22 b2 ]
float map[6];
Mat map_matrix = Mat(2, 3, CV_32F, map);
//
CvPoint2D32f center = cvPoint2D32f(width / 2, height / 2);
CvMat map_matrix2 = map_matrix;
cv2DRotationMatrix(center, degree, 1.0, &map_matrix2);
map[2] += (width_rotate - width) / 2;
map[5] += (height_rotate - height) / 2;
warpAffine(src, dst, map_matrix, Size(width_rotate, height_rotate), 0, 0, 0); // 0,0,0 1,0,0
// imshow("cpu", dst);
}
//
extern "C"
void cutImage(const Mat &src, Mat &dst, int dir, int len)
{
if (0 == dir) {
dst = Mat(Size(src.cols + len, src.rows), CV_8UC3);
uchar *src_data = src.data;
uchar *dst_data = dst.data;
double ratio = double(len) / double(dst.rows);
for (int i = 0, x = 0; i < src.rows; i++, x++)
{
int start = (src.rows - i) * ratio;
for (int j = start, y = 0; j < src.cols + start; j++, y++)
{
*(dst_data + (i*dst.cols + j) * 3 + 0) = *(src_data + (x*src.cols + y) * 3 + 0);
*(dst_data + (i*dst.cols + j) * 3 + 1) = *(src_data + (x*src.cols + y) * 3 + 1);
*(dst_data + (i*dst.cols + j) * 3 + 2) = *(src_data + (x*src.cols + y) * 3 + 2);
}
}
}
else {
dst = Mat(Size(src.cols, src.rows + len), CV_8UC3);
uchar *src_data = src.data;
uchar *dst_data = dst.data;
double ratio = double(len) / double(dst.cols);
for (int j = 0, y = 0; j < src.cols; j++, y++)
{
int start = j * ratio;
for (int i = start, x = 0; i < src.rows + start; i++, x++)
{
*(dst_data + (i*dst.cols + j) * 3 + 0) = *(src_data + (x*src.cols + y) * 3 + 0);
*(dst_data + (i*dst.cols + j) * 3 + 1) = *(src_data + (x*src.cols + y) * 3 + 1);
*(dst_data + (i*dst.cols + j) * 3 + 2) = *(src_data + (x*src.cols + y) * 3 + 2);
}
}
}
}
//////////////////////////////////
// cuda
extern "C"
bool initCUDA()
{
int count;
hipGetDeviceCount(&count);
if (count == 0) {
fprintf(stderr, "There is no device.\n");
return false;
}
int i;
for (i = 0; i < count; i++) {
hipDeviceProp_t prop;
if (hipGetDeviceProperties(&prop, i) == hipSuccess) {
if (prop.major >= 1) {
break;
}
}
}
if (i == count) {
fprintf(stderr, "There is no device supporting CUDA 1.x.\n");
return false;
}
hipSetDevice(i);
return true;
}
////////////////////////////////
// gpu
extern "C"
__global__ void resizeKernel(uchar* _src_dev, uchar * _dst_dev, int _src_step, int _dst_step,
int _src_rows, int _src_cols, int _dst_rows, int _dst_cols)
{
int i = blockIdx.x;
int j = blockIdx.y;
double fRows = _dst_rows / (float)_src_rows;
double fCols = _dst_cols / (float)_src_cols;
int pX = 0;
int pY = 0;
pX = (int)(i / fRows);
pY = (int)(j / fCols);
if (pX < _src_rows && pX >= 0 && pY < _src_cols && pY >= 0) {
*(_dst_dev + i*_dst_step + 3 * j + 0) = *(_src_dev + pX*_src_step + 3 * pY);
*(_dst_dev + i*_dst_step + 3 * j + 1) = *(_src_dev + pX*_src_step + 3 * pY + 1);
*(_dst_dev + i*_dst_step + 3 * j + 2) = *(_src_dev + pX*_src_step + 3 * pY + 2);
}
}
extern "C"
void resizeImageGPU(const Mat &_src, Mat &_dst, const Size &s)
{
_dst = Mat(s, CV_8UC3);
uchar *src_data = _src.data;
int width = _src.cols;
int height = _src.rows;
uchar *src_dev, *dst_dev;
hipMalloc((void**)&src_dev, 3 * width*height * sizeof(uchar));
hipMalloc((void**)&dst_dev, 3 * s.width * s.height * sizeof(uchar));
hipMemcpy(src_dev, src_data, 3 * width*height * sizeof(uchar), hipMemcpyHostToDevice);
int src_step = _src.step; // _src
int dst_step = _dst.step; // _dst
dim3 grid(s.height, s.width);
hipLaunchKernelGGL(( resizeKernel), dim3(grid), dim3(1) , 0, 0, src_dev, dst_dev, src_step, dst_step, height, width, s.height, s.width);
hipMemcpy(_dst.data, dst_dev, 3 * s.width * s.height * sizeof(uchar), hipMemcpyDeviceToHost);
}
////////////////////////////////
// gpu
extern "C"
__global__ void transferKernel(uchar* _src_dev, uchar * _dst_dev, int width, int height,
int _src_rows, int _src_cols, int xlen, int ylen)
{
int i = blockIdx.x;
int j = blockIdx.y;
int xadd = xlen < 0 ? 0 : abs(xlen);
int yadd = ylen < 0 ? abs(ylen) : 0;
int offset = i*gridDim.y + j;
int tran_offset = (i + yadd) * width + j + xadd;
if (i < gridDim.x && j >= 0 && j < gridDim.y && i >= 0) {
*(_dst_dev + tran_offset * 3 + 0) = *(_src_dev + offset * 3 + 0);
*(_dst_dev + tran_offset * 3 + 1) = *(_src_dev + offset * 3 + 1);
*(_dst_dev + tran_offset * 3 + 2) = *(_src_dev + offset * 3 + 2);
}
}
extern "C"
void transferImageGPU(const Mat &_src, Mat &_dst, int xlen, int ylen)
{
int width = _src.cols, height = _src.rows;
width += abs(xlen);
height += abs(ylen);
_dst = Mat::zeros(Size(width, height), CV_8UC3);
uchar *src_data = _src.data;
uchar *src_dev, *dst_dev;
hipMalloc((void**)&src_dev, 3 * _src.rows * _src.cols * sizeof(uchar));
hipMalloc((void**)&dst_dev, 3 * width * height * sizeof(uchar));
hipMemcpy(src_dev, src_data, 3 * _src.rows * _src.cols * sizeof(uchar), hipMemcpyHostToDevice);
hipMemset(dst_dev, 0, 3 * width * height * sizeof(uchar));
dim3 grid(_src.rows, _src.cols);
// cout << _src.rows << " " << _src.cols << endl;
hipLaunchKernelGGL(( transferKernel) , dim3(grid), dim3(1) , 0, 0, src_dev, dst_dev, width, height, _src.rows, _src.cols, xlen, ylen);
// cout << width << " " << height << " " << _src.rows << " " << _src.cols << endl;
hipMemcpy(_dst.data, dst_dev, 3 * width * height * sizeof(uchar), hipMemcpyDeviceToHost);
}
////////////////////////////////
// gpu
extern "C"
__global__ void mirrorKernel(uchar* _src_dev, uchar * _dst_dev, int height, int width, int num)
{
int i = blockIdx.x;
int j = blockIdx.y;
int offset = i*gridDim.y + j;
int x, y;
if (0 == num) {
x = height - i - 1;
y = j;
}
else {
x = i;
y = width - j - 1;
}
int mirror_offset = x*gridDim.y + y;
if (i < gridDim.x && j >= 0 && j < gridDim.y && i >= 0) {
*(_dst_dev + mirror_offset * 3 + 0) = *(_src_dev + offset * 3 + 0);
*(_dst_dev + mirror_offset * 3 + 1) = *(_src_dev + offset * 3 + 1);
*(_dst_dev + mirror_offset * 3 + 2) = *(_src_dev + offset * 3 + 2);
}
}
extern "C"
void mirrorImageGPU(const Mat &_src, Mat &_dst, int num)
{
_dst = Mat::zeros(Size(_src.cols, _src.rows), CV_8UC3);
uchar *src_data = _src.data;
uchar *src_dev, *dst_dev;
hipMalloc((void**)&src_dev, 3 * _src.rows * _src.cols * sizeof(uchar));
hipMalloc((void**)&dst_dev, 3 * _src.rows * _src.cols * sizeof(uchar));
hipMemcpy(src_dev, src_data, 3 * _src.rows * _src.cols * sizeof(uchar), hipMemcpyHostToDevice);
dim3 grid(_src.rows, _src.cols);
hipLaunchKernelGGL(( mirrorKernel) , dim3(grid), dim3(1) , 0, 0, src_dev, dst_dev, _src.rows, _src.cols, num);
hipMemcpy(_dst.data, dst_dev, 3 * _src.rows * _src.cols * sizeof(uchar), hipMemcpyDeviceToHost);
}
////////////////////////////////
// gpu
extern "C"
__device__ int saturateCast(double num)
{
return round(num);
}
__global__ void rotateKernel(uchar* _src_dev, uchar * _dst_dev, int width, int height,
const double m0, const double m1, const double m2, const double m3, const double m4, const double m5,
int round_delta)
{
int y = blockIdx.x;
int x = blockIdx.y;
// if (y < gridDim.x && y > 0 && x < gridDim.y && x > 0)
{
int adelta = saturateCast(m0 * x * 1024);
int bdelta = saturateCast(m3 * x * 1024);
int X0 = saturateCast((m1 * y + m2) * 1024) + round_delta;
int Y0 = saturateCast((m4 * y + m5) * 1024) + round_delta;
int X = (X0 + adelta) >> 10;
int Y = (Y0 + bdelta) >> 10;
if ((unsigned)X < width && (unsigned)Y < height)
{
*(_dst_dev + (y*gridDim.y + x) * 3 + 0) = *(_src_dev + (Y*width + X) * 3 + 0);
*(_dst_dev + (y*gridDim.y + x) * 3 + 1) = *(_src_dev + (Y*width + X) * 3 + 1);
*(_dst_dev + (y*gridDim.y + x) * 3 + 2) = *(_src_dev + (Y*width + X) * 3 + 2);
}
else
{
*(_dst_dev + (y*gridDim.y + x) * 3 + 0) = 0;
*(_dst_dev + (y*gridDim.y + x) * 3 + 1) = 0;
*(_dst_dev + (y*gridDim.y + x) * 3 + 2) = 0;
}
}
}
extern "C"
void rotateImageGPU(const Mat &src, Mat &dst, int degree)
{
degree = -degree;
double angle = degree * CV_PI / 180.;
double alpha = cos(angle);
double beta = sin(angle);
int width = src.cols;
int height = src.rows;
int width_rotate = cvRound(width * fabs(alpha) + height * fabs(beta));
int height_rotate = cvRound(height * fabs(alpha) + width * fabs(beta));
double m[6];
m[0] = alpha;
m[1] = beta;
// m[2] = (1 - alpha) * width / 2. - beta * height / 2.;
m[2] = height * -beta;
// cout << width << " " << height << endl;
// cout << width_rotate << " " << height_rotate << endl;
// cout << alpha << " " << beta << endl;
// cout << m[2] << endl;
m[3] = -m[1];
m[4] = m[0];
// m[5] = beta * width / 2. + (1 - alpha) * height / 2.;
m[5] = 0;
// cout << "m[5] " << m[5] << endl;
Mat M = Mat(2, 3, CV_64F, m);
dst = Mat(cv::Size(width_rotate, height_rotate), src.type(), cv::Scalar::all(0));
double D = m[0] * m[4] - m[1] * m[3];
D = D != 0 ? 1. / D : 0;
double A11 = m[4] * D, A22 = m[0] * D;
m[0] = A11; m[1] *= -D;
m[3] *= -D; m[4] = A22;
double b1 = -m[0] * m[2] - m[1] * m[5];
double b2 = -m[3] * m[2] - m[4] * m[5];
m[2] = b1; m[5] = b2;
int round_delta = 512; //
// for (int y = 0; y < height_rotate; ++y)
// {
// for (int x = 0; x < width_rotate; ++x)
// {
// int adelta = cv::saturate_cast<int>(m[0] * x * 1024);
// int bdelta = cv::saturate_cast<int>(m[3] * x * 1024);
// int X0 = cv::saturate_cast<int>((m[1] * y + m[2]) * 1024) + round_delta;
// int Y0 = cv::saturate_cast<int>((m[4] * y + m[5]) * 1024) + round_delta;
// int X = (X0 + adelta) >> 10;
// int Y = (Y0 + bdelta) >> 10;
// if ((unsigned)X < width && (unsigned)Y < height)
// {
// // dst.at<cv::Vec3b>(y, x) = src.at<cv::Vec3b>(Y, X);
// *(dst.data + (y*width_rotate+x)*3 + 0) = *(src.data + (Y*width+X)*3 + 0);
// *(dst.data + (y*width_rotate+x)*3 + 1) = *(src.data + (Y*width+X)*3 + 1);
// *(dst.data + (y*width_rotate+x)*3 + 2) = *(src.data + (Y*width+X)*3 + 2);
// }
// }
// }
// cout << saturate_cast<int>(-99999999999) << " **" << endl;
// cout << INT_MAX << endl;
uchar *src_data = src.data;
uchar *src_dev, *dst_dev;
hipMalloc((void**)&src_dev, 3 * src.rows * src.cols * sizeof(uchar));
hipMalloc((void**)&dst_dev, 3 * width_rotate * height_rotate * sizeof(uchar));
hipMemcpy(src_dev, src_data, 3 * src.rows * src.cols * sizeof(uchar), hipMemcpyHostToDevice);
hipMemset(dst_dev, 0, width_rotate * height_rotate * sizeof(uchar));
dim3 grid(height_rotate, width_rotate);
hipLaunchKernelGGL(( rotateKernel) , dim3(grid), dim3(1) , 0, 0, src_dev, dst_dev, width, height,
m[0], m[1], m[2], m[3], m[4], m[5], round_delta);
hipMemcpy(dst.data, dst_dev, 3 * width_rotate * height_rotate * sizeof(uchar), hipMemcpyDeviceToHost);
}
////////////////////////////////
// gpu
extern "C"
__global__ void cutKernel(uchar* _src_dev, uchar * _dst_dev, int width, double ratio, int dir)
{
int i = blockIdx.x;
int j = blockIdx.y;
int x = 0, y = 0;
if (0 == dir)
{
y = (gridDim.x - i) * ratio;
}
else {
x = j * ratio;
}
/* int start = (gridDim.x - i) * ratio;
int y = start;
*/
int offset = i*gridDim.y + j;
int tran_offset = (i + x) * width + j + y;
if (i < gridDim.x && j >= 0 && j < gridDim.y && i >= 0) {
*(_dst_dev + tran_offset * 3 + 0) = *(_src_dev + offset * 3 + 0);
*(_dst_dev + tran_offset * 3 + 1) = *(_src_dev + offset * 3 + 1);
*(_dst_dev + tran_offset * 3 + 2) = *(_src_dev + offset * 3 + 2);
}
}
/*__global__ void cutKernel1(uchar* _src_dev, uchar * _dst_dev, int width, double ratio)
{
int i = blockIdx.x;
int j = blockIdx.y;
int start = j * ratio;
int x = start;
int offset = i*gridDim.y + j;
int tran_offset = (i+x) * width + j;
if (i < gridDim.x && j >= 0 && j < gridDim.y && i >= 0) {
*(_dst_dev + tran_offset*3 + 0) = *(_src_dev + offset*3 + 0);
*(_dst_dev + tran_offset*3 + 1) = *(_src_dev + offset*3 + 1);
*(_dst_dev + tran_offset*3 + 2) = *(_src_dev + offset*3 + 2);
}
}*/
extern "C"
void cutImageGPU(const Mat &_src, Mat &_dst, int dir, int len)
{
int width = _src.cols, height = _src.rows;
/* if (0 == dir) {
width += len;
_dst = Mat::zeros(Size(width, height), CV_8UC3);
uchar *src_data = _src.data;
uchar *src_dev , *dst_dev;
hipMalloc( (void**)&src_dev, 3 * _src.rows * _src.cols * sizeof(uchar) );
hipMalloc( (void**)&dst_dev, 3 * width * height * sizeof(uchar) );
hipMemcpy(src_dev, src_data, 3 * _src.rows * _src.cols * sizeof(uchar), hipMemcpyHostToDevice);
hipMemset(dst_dev, 0, 3 * width * height * sizeof(uchar));
double ratio = (double)len / _dst.rows;
dim3 grid(_src.rows, _src.cols);
hipLaunchKernelGGL(( cutKernel) , dim3(grid), dim3(1) , 0, 0, src_dev, dst_dev, width, ratio, dir);
hipMemcpy(_dst.data, dst_dev, 3 * width * height * sizeof(uchar), hipMemcpyDeviceToHost);
} else {
height += len;
_dst = Mat::zeros(Size(width, height), CV_8UC3);
uchar *src_data = _src.data;
uchar *src_dev , *dst_dev;
hipMalloc( (void**)&src_dev, 3 * _src.rows * _src.cols * sizeof(uchar) );
hipMalloc( (void**)&dst_dev, 3 * width * height * sizeof(uchar) );
hipMemcpy(src_dev, src_data, 3 * _src.rows * _src.cols * sizeof(uchar), hipMemcpyHostToDevice);
hipMemset(dst_dev, 0, 3 * width * height * sizeof(uchar));
double ratio = (double)len / _dst.cols;
dim3 grid(_src.rows, _src.cols);
cutKernel1 <<< grid, 1 >>>(src_dev, dst_dev, width, ratio, dir);
hipMemcpy(_dst.data, dst_dev, 3 * width * height * sizeof(uchar), hipMemcpyDeviceToHost);
}*/
double ratio;
if (0 == dir) {
width += len;
ratio = (double)len / height;
}
else {
height += len;
ratio = (double)len / width;
}
_dst = Mat::zeros(Size(width, height), CV_8UC3);
uchar *src_data = _src.data;
uchar *src_dev, *dst_dev;
hipMalloc((void**)&src_dev, 3 * _src.rows * _src.cols * sizeof(uchar));
hipMalloc((void**)&dst_dev, 3 * width * height * sizeof(uchar));
hipMemcpy(src_dev, src_data, 3 * _src.rows * _src.cols * sizeof(uchar), hipMemcpyHostToDevice);
hipMemset(dst_dev, 0, 3 * width * height * sizeof(uchar));
dim3 grid(_src.rows, _src.cols);
hipLaunchKernelGGL(( cutKernel) , dim3(grid), dim3(1) , 0, 0, src_dev, dst_dev, width, ratio, dir);
hipMemcpy(_dst.data, dst_dev, 3 * width * height * sizeof(uchar), hipMemcpyDeviceToHost);
}
//int main()
//{
// Mat src = cv::imread("f.bmp" , 1); //
// Mat dst_scale_cpu;
// Mat dst_scale_gpu;
// Mat dst_trans_cpu;
// Mat dst_trans_gpu;
// Mat dst_mirror_cpu;
// Mat dst_mirror_gpu;
// Mat dst_rotate_cpu;
// Mat dst_rotate_gpu;
// Mat dst_cut_cpu;
// Mat dst_cut_gpu;
///*
// struct timeval start;
// struct timeval end;
// unsigned long timer;
// gettimeofday(&start, NULL); //
// resizeImage(src, dst_scale_cpu, Size(src.cols * 2, src.rows * 2)); // CPU dst_cpu
// gettimeofday(&end, NULL); //
// timer = 1000000 * (end.tv_sec - start.tv_sec) + end.tv_usec - start.tv_usec;
// cout << "cpu" << timer << "us\n";
//*/
// struct timeval start;
// struct timeval end;
// unsigned long timer;
// gettimeofday(&start, NULL); //
/////////////////////////////
// resizeImage(src, dst_scale_cpu, Size(src.cols * 2, src.rows * 2));
// transferImage(src, dst_trans_cpu, 100, -100);
// mirrorImage(src, dst_mirror_cpu, 1);
// rotateImage(src, dst_rotate_cpu, 30);
// cutImage(src, dst_cut_cpu, 0, 50);
/////////////////////////////
// gettimeofday(&end, NULL); //
// timer = 1000000 * (end.tv_sec - start.tv_sec) + end.tv_usec - start.tv_usec;
// cout << "cpu" << timer << "us\n";
// initCUDA();
// gettimeofday(&start, NULL);
/////////////////////////////
// resizeImageGPU(src, dst_scale_gpu, Size(src.cols * 2, src.rows * 2));
// transferImageGPU(src, dst_trans_gpu, 100, -100);
// mirrorImageGPU(src, dst_mirror_gpu, 1);
// rotateImageGPU(src, dst_rotate_gpu, 30);
// cutImageGPU(src, dst_cut_gpu, 0, 50);
/////////////////////////////
// gettimeofday(&end, NULL);
// timer = 1000000 * (end.tv_sec - start.tv_sec) + end.tv_usec - start.tv_usec;
// cout << "gpu" << timer << "us\n";
////////////////////////////
// imshow("", src);
// imshow("_cpu", dst_scale_cpu);
// imshow("_gpu", dst_scale_gpu);
// imshow("_cpu", dst_trans_cpu);
// imshow("_gpu", dst_trans_gpu);
// imshow("_cpu", dst_mirror_cpu);
// imshow("_gpu", dst_mirror_gpu);
// imshow("_cpu", dst_rotate_cpu);
// imshow("_gpu", dst_rotate_gpu);
// imshow("_cpu", dst_cut_cpu);
// imshow("_gpu", dst_cut_gpu);
// // transferImage(src, dst_trans_cpu, 100, -100);
// // imshow("cpu_trans", dst_trans_cpu);
// // transferImageGPU(src, dst_trans_gpu, 100, -100);
// // imshow("gpu_trans", dst_trans_gpu);
// // mirrorImage(src, dst_mirror_cpu, 1);
// // mirrorImageGPU(src, dst_mirror_gpu, 1);
// // imshow("gpu", dst_mirror_gpu);
// // rotateImage(src, dst_rotate_cpu, 30);
// // rotateImageGPU(src, dst_rotate_gpu, 30);
// // imshow("gpu", dst_rotate_gpu);
// // cutImage(src, dst_cut_cpu, 0, 50);
// // imshow("cpu", dst_cut_cpu);
// // cutImageGPU(src, dst_cut_gpu, 0, 50);
// // imshow("gpu", dst_cut_gpu);
///*
// initCUDA();
// Mat dst_gpu;
// gettimeofday(&start, NULL);
// resizeImageGPU(src, dst_gpu, Size(src.cols * 2, src.rows * 2));
//// imshow("src", src);
//// imshow(" ", dst_gpu);
// gettimeofday(&end, NULL);
// timer = 1000000 * (end.tv_sec - start.tv_sec) + end.tv_usec - start.tv_usec;
// cout << "gpu" << timer << "us\n";
//// imshow("Demo", dst_gpu);
//*/
// waitKey(0);
// return 0;
//}
|
fed5705eeb4ad04a271b5b839c90dd945a8f3c4c.cu
|
#include "image-process.h"
//#if defined(_MSC_VER) && (_MSC_VER >= 1600)
//#pragma execution_character_set("utf-8")
//#endif
//#include <iostream>
//#include <cmath>
//#include <limits.h>
//#include <cuda.h>
//#include <opencv2/opencv.hpp>
//using namespace cv;
//using namespace std;
//// cpu: 对图像进行缩放
//extern "C" void resizeImage(const Mat &src, Mat &dst, const Size &s);
//// cpu: 对图像进行平移 xlen左右 ylen上下
//extern "C" void transferImage(const Mat &src, Mat &dst, int xlen, int ylen);
//// cpu: 对图像镜面变换 num = 0 | 1
//extern "C" void mirrorImage(const Mat &src, Mat &dst, int num);
//// cpu: 对图像旋转变换
//extern "C" void rotateImage(const Mat &src, Mat &dst, int degree);
//// 对图像进行错切
//extern "C" void cutImage(const Mat &src, Mat &dst, int dir, int len);
// cuda 设备检测
//extern "C" bool initCUDA();
//extern "C" void resizeImageGPU(const Mat &_src, Mat &_dst, const Size &s);
//extern "C" void transferImageGPU(const Mat &_src, Mat &_dst, int xlen, int ylen);
//extern "C" void mirrorImageGPU(const Mat &_src, Mat &_dst, int num);
//extern "C" void rotateImageGPU(const Mat &src, Mat &dst, int degree);
//extern "C" void cutImageGPU(const Mat &_src, Mat &_dst, int dir, int len);
// cpu: 对图像进行缩放
extern "C"
void resizeImage(const Mat &src, Mat &dst, const Size &s)
{
dst = Mat::zeros(s, CV_8UC3);
double fRows = s.height / (float)src.rows;
double fCols = s.width / (float)src.cols;
int pX = 0;
int pY = 0;
for (int i = 0; i != dst.rows; ++i) {
for (int j = 0; j != dst.cols; ++j) {
pX = cvRound(i / (double)fRows); // 四舍五入
pY = cvRound(j / (double)fCols);
if (pX < src.rows && pX >= 0 && pY < src.cols && pY >= 0) {
dst.at<Vec3b>(i, j)[0] = src.at<Vec3b>(pX, pY)[0]; // B
dst.at<Vec3b>(i, j)[1] = src.at<Vec3b>(pX, pY)[1]; // G
dst.at<Vec3b>(i, j)[2] = src.at<Vec3b>(pX, pY)[2]; // R
}
}
}
}
// cpu: 对图像进行平移 xlen左右 ylen上下
extern "C"
void transferImage(const Mat &src, Mat &dst, int xlen, int ylen)
{
int width = src.cols, height = src.rows;
width += abs(xlen);
height += abs(ylen);
dst = Mat::zeros(Size(width, height), CV_8UC3);
int xadd = xlen < 0 ? 0 : abs(xlen);
int yadd = ylen < 0 ? abs(ylen) : 0;
for (int i = 0; i != src.rows; ++i) {
for (int j = 0; j != src.cols; ++j) {
dst.at<Vec3b>(i + yadd, j + xadd)[0] = src.at<Vec3b>(i, j)[0];
dst.at<Vec3b>(i + yadd, j + xadd)[1] = src.at<Vec3b>(i, j)[1];
dst.at<Vec3b>(i + yadd, j + xadd)[2] = src.at<Vec3b>(i, j)[2];
}
}
}
// cpu: 对图像镜面变换 num = 0 | 1; (0:x轴;1:y轴)
extern "C"
void mirrorImage(const Mat &src, Mat &dst, int num)
{
dst = Mat::zeros(Size(src.cols, src.rows), CV_8UC3);
if (0 == num) {
for (int i = 0, x = src.rows - 1; i != src.rows; ++i, --x) {
for (int j = 0, y = 0; j != src.cols; ++j, ++y) {
dst.at<Vec3b>(x, y)[0] = src.at<Vec3b>(i, j)[0];
dst.at<Vec3b>(x, y)[1] = src.at<Vec3b>(i, j)[1];
dst.at<Vec3b>(x, y)[2] = src.at<Vec3b>(i, j)[2];
}
}
}
else {
for (int i = 0, x = 0; i != src.rows; ++i, ++x) {
for (int j = 0, y = src.cols - 1; j != src.cols; ++j, --y) {
dst.at<Vec3b>(x, y)[0] = src.at<Vec3b>(i, j)[0];
dst.at<Vec3b>(x, y)[1] = src.at<Vec3b>(i, j)[1];
dst.at<Vec3b>(x, y)[2] = src.at<Vec3b>(i, j)[2];
}
}
}
}
// cpu: 对图像旋转变换 http://blog.csdn.net/ab1322583838/article/details/52102732 http://blog.csdn.net/fengbingchun/article/details/17713429
extern "C"
void rotateImage(const Mat &src, Mat &dst, int degree)
{
degree = -degree; // 原始为逆时针,取负转为顺时针
double angle = degree * CV_PI / 180.; // 转为弧度
double a = sin(angle), b = cos(angle);
int width = src.cols, height = src.rows;
// 旋转后的新图尺寸
int width_rotate = int(height * fabs(a) + width * fabs(b));
int height_rotate = int(width * fabs(a) + height * fabs(b));
dst = Mat::zeros(Size(width_rotate, height_rotate), CV_8UC3);
// 旋转数组map
// [ m0 m1 m2 ] ===> [ A11 A12 b1 ]
// [ m3 m4 m5 ] ===> [ A21 A22 b2 ]
float map[6];
Mat map_matrix = Mat(2, 3, CV_32F, map);
// 旋转中心
CvPoint2D32f center = cvPoint2D32f(width / 2, height / 2);
CvMat map_matrix2 = map_matrix;
cv2DRotationMatrix(center, degree, 1.0, &map_matrix2);
map[2] += (width_rotate - width) / 2;
map[5] += (height_rotate - height) / 2;
warpAffine(src, dst, map_matrix, Size(width_rotate, height_rotate), 0, 0, 0); // 0,0,0 最近邻插值 1,0,0 双线性插值
// imshow("cpu", dst);
}
// 对图像进行错切
extern "C"
void cutImage(const Mat &src, Mat &dst, int dir, int len)
{
if (0 == dir) {
dst = Mat(Size(src.cols + len, src.rows), CV_8UC3);
uchar *src_data = src.data;
uchar *dst_data = dst.data;
double ratio = double(len) / double(dst.rows);
for (int i = 0, x = 0; i < src.rows; i++, x++)
{
int start = (src.rows - i) * ratio;
for (int j = start, y = 0; j < src.cols + start; j++, y++)
{
*(dst_data + (i*dst.cols + j) * 3 + 0) = *(src_data + (x*src.cols + y) * 3 + 0);
*(dst_data + (i*dst.cols + j) * 3 + 1) = *(src_data + (x*src.cols + y) * 3 + 1);
*(dst_data + (i*dst.cols + j) * 3 + 2) = *(src_data + (x*src.cols + y) * 3 + 2);
}
}
}
else {
dst = Mat(Size(src.cols, src.rows + len), CV_8UC3);
uchar *src_data = src.data;
uchar *dst_data = dst.data;
double ratio = double(len) / double(dst.cols);
for (int j = 0, y = 0; j < src.cols; j++, y++)
{
int start = j * ratio;
for (int i = start, x = 0; i < src.rows + start; i++, x++)
{
*(dst_data + (i*dst.cols + j) * 3 + 0) = *(src_data + (x*src.cols + y) * 3 + 0);
*(dst_data + (i*dst.cols + j) * 3 + 1) = *(src_data + (x*src.cols + y) * 3 + 1);
*(dst_data + (i*dst.cols + j) * 3 + 2) = *(src_data + (x*src.cols + y) * 3 + 2);
}
}
}
}
//////////////////////////////////
// cuda 设备检测
extern "C"
bool initCUDA()
{
int count;
cudaGetDeviceCount(&count);
if (count == 0) {
fprintf(stderr, "There is no device.\n");
return false;
}
int i;
for (i = 0; i < count; i++) {
cudaDeviceProp prop;
if (cudaGetDeviceProperties(&prop, i) == cudaSuccess) {
if (prop.major >= 1) {
break;
}
}
}
if (i == count) {
fprintf(stderr, "There is no device supporting CUDA 1.x.\n");
return false;
}
cudaSetDevice(i);
return true;
}
////////////////////////////////
// gpu 缩放变换
extern "C"
__global__ void resizeKernel(uchar* _src_dev, uchar * _dst_dev, int _src_step, int _dst_step,
int _src_rows, int _src_cols, int _dst_rows, int _dst_cols)
{
int i = blockIdx.x;
int j = blockIdx.y;
double fRows = _dst_rows / (float)_src_rows;
double fCols = _dst_cols / (float)_src_cols;
int pX = 0;
int pY = 0;
pX = (int)(i / fRows);
pY = (int)(j / fCols);
if (pX < _src_rows && pX >= 0 && pY < _src_cols && pY >= 0) {
*(_dst_dev + i*_dst_step + 3 * j + 0) = *(_src_dev + pX*_src_step + 3 * pY);
*(_dst_dev + i*_dst_step + 3 * j + 1) = *(_src_dev + pX*_src_step + 3 * pY + 1);
*(_dst_dev + i*_dst_step + 3 * j + 2) = *(_src_dev + pX*_src_step + 3 * pY + 2);
}
}
extern "C"
void resizeImageGPU(const Mat &_src, Mat &_dst, const Size &s)
{
_dst = Mat(s, CV_8UC3);
uchar *src_data = _src.data;
int width = _src.cols;
int height = _src.rows;
uchar *src_dev, *dst_dev;
cudaMalloc((void**)&src_dev, 3 * width*height * sizeof(uchar));
cudaMalloc((void**)&dst_dev, 3 * s.width * s.height * sizeof(uchar));
cudaMemcpy(src_dev, src_data, 3 * width*height * sizeof(uchar), cudaMemcpyHostToDevice);
int src_step = _src.step; // 矩阵_src一行元素的字节数
int dst_step = _dst.step; // 矩阵_dst一行元素的字节数
dim3 grid(s.height, s.width);
resizeKernel<<< grid, 1 >>>(src_dev, dst_dev, src_step, dst_step, height, width, s.height, s.width);
cudaMemcpy(_dst.data, dst_dev, 3 * s.width * s.height * sizeof(uchar), cudaMemcpyDeviceToHost);
}
////////////////////////////////
// gpu 平移变换
extern "C"
__global__ void transferKernel(uchar* _src_dev, uchar * _dst_dev, int width, int height,
int _src_rows, int _src_cols, int xlen, int ylen)
{
int i = blockIdx.x;
int j = blockIdx.y;
int xadd = xlen < 0 ? 0 : abs(xlen);
int yadd = ylen < 0 ? abs(ylen) : 0;
int offset = i*gridDim.y + j;
int tran_offset = (i + yadd) * width + j + xadd;
if (i < gridDim.x && j >= 0 && j < gridDim.y && i >= 0) {
*(_dst_dev + tran_offset * 3 + 0) = *(_src_dev + offset * 3 + 0);
*(_dst_dev + tran_offset * 3 + 1) = *(_src_dev + offset * 3 + 1);
*(_dst_dev + tran_offset * 3 + 2) = *(_src_dev + offset * 3 + 2);
}
}
extern "C"
void transferImageGPU(const Mat &_src, Mat &_dst, int xlen, int ylen)
{
int width = _src.cols, height = _src.rows;
width += abs(xlen);
height += abs(ylen);
_dst = Mat::zeros(Size(width, height), CV_8UC3);
uchar *src_data = _src.data;
uchar *src_dev, *dst_dev;
cudaMalloc((void**)&src_dev, 3 * _src.rows * _src.cols * sizeof(uchar));
cudaMalloc((void**)&dst_dev, 3 * width * height * sizeof(uchar));
cudaMemcpy(src_dev, src_data, 3 * _src.rows * _src.cols * sizeof(uchar), cudaMemcpyHostToDevice);
cudaMemset(dst_dev, 0, 3 * width * height * sizeof(uchar));
dim3 grid(_src.rows, _src.cols);
// cout << _src.rows << " " << _src.cols << endl;
transferKernel <<< grid, 1 >>>(src_dev, dst_dev, width, height, _src.rows, _src.cols, xlen, ylen);
// cout << width << " " << height << " " << _src.rows << " " << _src.cols << endl;
cudaMemcpy(_dst.data, dst_dev, 3 * width * height * sizeof(uchar), cudaMemcpyDeviceToHost);
}
////////////////////////////////
// gpu 镜面变换
extern "C"
__global__ void mirrorKernel(uchar* _src_dev, uchar * _dst_dev, int height, int width, int num)
{
int i = blockIdx.x;
int j = blockIdx.y;
int offset = i*gridDim.y + j;
int x, y;
if (0 == num) {
x = height - i - 1;
y = j;
}
else {
x = i;
y = width - j - 1;
}
int mirror_offset = x*gridDim.y + y;
if (i < gridDim.x && j >= 0 && j < gridDim.y && i >= 0) {
*(_dst_dev + mirror_offset * 3 + 0) = *(_src_dev + offset * 3 + 0);
*(_dst_dev + mirror_offset * 3 + 1) = *(_src_dev + offset * 3 + 1);
*(_dst_dev + mirror_offset * 3 + 2) = *(_src_dev + offset * 3 + 2);
}
}
extern "C"
void mirrorImageGPU(const Mat &_src, Mat &_dst, int num)
{
_dst = Mat::zeros(Size(_src.cols, _src.rows), CV_8UC3);
uchar *src_data = _src.data;
uchar *src_dev, *dst_dev;
cudaMalloc((void**)&src_dev, 3 * _src.rows * _src.cols * sizeof(uchar));
cudaMalloc((void**)&dst_dev, 3 * _src.rows * _src.cols * sizeof(uchar));
cudaMemcpy(src_dev, src_data, 3 * _src.rows * _src.cols * sizeof(uchar), cudaMemcpyHostToDevice);
dim3 grid(_src.rows, _src.cols);
mirrorKernel <<< grid, 1 >>>(src_dev, dst_dev, _src.rows, _src.cols, num);
cudaMemcpy(_dst.data, dst_dev, 3 * _src.rows * _src.cols * sizeof(uchar), cudaMemcpyDeviceToHost);
}
////////////////////////////////
// gpu 旋转变换
extern "C"
__device__ int saturateCast(double num)
{
return round(num);
}
__global__ void rotateKernel(uchar* _src_dev, uchar * _dst_dev, int width, int height,
const double m0, const double m1, const double m2, const double m3, const double m4, const double m5,
int round_delta)
{
int y = blockIdx.x;
int x = blockIdx.y;
// if (y < gridDim.x && y > 0 && x < gridDim.y && x > 0)
{
int adelta = saturateCast(m0 * x * 1024);
int bdelta = saturateCast(m3 * x * 1024);
int X0 = saturateCast((m1 * y + m2) * 1024) + round_delta;
int Y0 = saturateCast((m4 * y + m5) * 1024) + round_delta;
int X = (X0 + adelta) >> 10;
int Y = (Y0 + bdelta) >> 10;
if ((unsigned)X < width && (unsigned)Y < height)
{
*(_dst_dev + (y*gridDim.y + x) * 3 + 0) = *(_src_dev + (Y*width + X) * 3 + 0);
*(_dst_dev + (y*gridDim.y + x) * 3 + 1) = *(_src_dev + (Y*width + X) * 3 + 1);
*(_dst_dev + (y*gridDim.y + x) * 3 + 2) = *(_src_dev + (Y*width + X) * 3 + 2);
}
else
{
*(_dst_dev + (y*gridDim.y + x) * 3 + 0) = 0;
*(_dst_dev + (y*gridDim.y + x) * 3 + 1) = 0;
*(_dst_dev + (y*gridDim.y + x) * 3 + 2) = 0;
}
}
}
extern "C"
void rotateImageGPU(const Mat &src, Mat &dst, int degree)
{
degree = -degree;
double angle = degree * CV_PI / 180.;
double alpha = cos(angle);
double beta = sin(angle);
int width = src.cols;
int height = src.rows;
int width_rotate = cvRound(width * fabs(alpha) + height * fabs(beta));
int height_rotate = cvRound(height * fabs(alpha) + width * fabs(beta));
double m[6];
m[0] = alpha;
m[1] = beta;
// m[2] = (1 - alpha) * width / 2. - beta * height / 2.;
m[2] = height * -beta;
// cout << width << " " << height << endl;
// cout << width_rotate << " " << height_rotate << endl;
// cout << alpha << " " << beta << endl;
// cout << m[2] << endl;
m[3] = -m[1];
m[4] = m[0];
// m[5] = beta * width / 2. + (1 - alpha) * height / 2.;
m[5] = 0;
// cout << "m[5] " << m[5] << endl;
Mat M = Mat(2, 3, CV_64F, m);
dst = Mat(cv::Size(width_rotate, height_rotate), src.type(), cv::Scalar::all(0));
double D = m[0] * m[4] - m[1] * m[3];
D = D != 0 ? 1. / D : 0;
double A11 = m[4] * D, A22 = m[0] * D;
m[0] = A11; m[1] *= -D;
m[3] *= -D; m[4] = A22;
double b1 = -m[0] * m[2] - m[1] * m[5];
double b2 = -m[3] * m[2] - m[4] * m[5];
m[2] = b1; m[5] = b2;
int round_delta = 512; // 最近邻插值
// for (int y = 0; y < height_rotate; ++y)
// {
// for (int x = 0; x < width_rotate; ++x)
// {
// int adelta = cv::saturate_cast<int>(m[0] * x * 1024);
// int bdelta = cv::saturate_cast<int>(m[3] * x * 1024);
// int X0 = cv::saturate_cast<int>((m[1] * y + m[2]) * 1024) + round_delta;
// int Y0 = cv::saturate_cast<int>((m[4] * y + m[5]) * 1024) + round_delta;
// int X = (X0 + adelta) >> 10;
// int Y = (Y0 + bdelta) >> 10;
// if ((unsigned)X < width && (unsigned)Y < height)
// {
// // dst.at<cv::Vec3b>(y, x) = src.at<cv::Vec3b>(Y, X);
// *(dst.data + (y*width_rotate+x)*3 + 0) = *(src.data + (Y*width+X)*3 + 0);
// *(dst.data + (y*width_rotate+x)*3 + 1) = *(src.data + (Y*width+X)*3 + 1);
// *(dst.data + (y*width_rotate+x)*3 + 2) = *(src.data + (Y*width+X)*3 + 2);
// }
// }
// }
// cout << saturate_cast<int>(-99999999999) << " **" << endl;
// cout << INT_MAX << endl;
uchar *src_data = src.data;
uchar *src_dev, *dst_dev;
cudaMalloc((void**)&src_dev, 3 * src.rows * src.cols * sizeof(uchar));
cudaMalloc((void**)&dst_dev, 3 * width_rotate * height_rotate * sizeof(uchar));
cudaMemcpy(src_dev, src_data, 3 * src.rows * src.cols * sizeof(uchar), cudaMemcpyHostToDevice);
cudaMemset(dst_dev, 0, width_rotate * height_rotate * sizeof(uchar));
dim3 grid(height_rotate, width_rotate);
rotateKernel <<< grid, 1 >>>(src_dev, dst_dev, width, height,
m[0], m[1], m[2], m[3], m[4], m[5], round_delta);
cudaMemcpy(dst.data, dst_dev, 3 * width_rotate * height_rotate * sizeof(uchar), cudaMemcpyDeviceToHost);
}
////////////////////////////////
// gpu 错切变换
extern "C"
__global__ void cutKernel(uchar* _src_dev, uchar * _dst_dev, int width, double ratio, int dir)
{
int i = blockIdx.x;
int j = blockIdx.y;
int x = 0, y = 0;
if (0 == dir)
{
y = (gridDim.x - i) * ratio;
}
else {
x = j * ratio;
}
/* int start = (gridDim.x - i) * ratio;
int y = start;
*/
int offset = i*gridDim.y + j;
int tran_offset = (i + x) * width + j + y;
if (i < gridDim.x && j >= 0 && j < gridDim.y && i >= 0) {
*(_dst_dev + tran_offset * 3 + 0) = *(_src_dev + offset * 3 + 0);
*(_dst_dev + tran_offset * 3 + 1) = *(_src_dev + offset * 3 + 1);
*(_dst_dev + tran_offset * 3 + 2) = *(_src_dev + offset * 3 + 2);
}
}
/*__global__ void cutKernel1(uchar* _src_dev, uchar * _dst_dev, int width, double ratio)
{
int i = blockIdx.x;
int j = blockIdx.y;
int start = j * ratio;
int x = start;
int offset = i*gridDim.y + j;
int tran_offset = (i+x) * width + j;
if (i < gridDim.x && j >= 0 && j < gridDim.y && i >= 0) {
*(_dst_dev + tran_offset*3 + 0) = *(_src_dev + offset*3 + 0);
*(_dst_dev + tran_offset*3 + 1) = *(_src_dev + offset*3 + 1);
*(_dst_dev + tran_offset*3 + 2) = *(_src_dev + offset*3 + 2);
}
}*/
extern "C"
void cutImageGPU(const Mat &_src, Mat &_dst, int dir, int len)
{
int width = _src.cols, height = _src.rows;
/* if (0 == dir) {
width += len;
_dst = Mat::zeros(Size(width, height), CV_8UC3);
uchar *src_data = _src.data;
uchar *src_dev , *dst_dev;
cudaMalloc( (void**)&src_dev, 3 * _src.rows * _src.cols * sizeof(uchar) );
cudaMalloc( (void**)&dst_dev, 3 * width * height * sizeof(uchar) );
cudaMemcpy(src_dev, src_data, 3 * _src.rows * _src.cols * sizeof(uchar), cudaMemcpyHostToDevice);
cudaMemset(dst_dev, 0, 3 * width * height * sizeof(uchar));
double ratio = (double)len / _dst.rows;
dim3 grid(_src.rows, _src.cols);
cutKernel <<< grid, 1 >>>(src_dev, dst_dev, width, ratio, dir);
cudaMemcpy(_dst.data, dst_dev, 3 * width * height * sizeof(uchar), cudaMemcpyDeviceToHost);
} else {
height += len;
_dst = Mat::zeros(Size(width, height), CV_8UC3);
uchar *src_data = _src.data;
uchar *src_dev , *dst_dev;
cudaMalloc( (void**)&src_dev, 3 * _src.rows * _src.cols * sizeof(uchar) );
cudaMalloc( (void**)&dst_dev, 3 * width * height * sizeof(uchar) );
cudaMemcpy(src_dev, src_data, 3 * _src.rows * _src.cols * sizeof(uchar), cudaMemcpyHostToDevice);
cudaMemset(dst_dev, 0, 3 * width * height * sizeof(uchar));
double ratio = (double)len / _dst.cols;
dim3 grid(_src.rows, _src.cols);
cutKernel1 <<< grid, 1 >>>(src_dev, dst_dev, width, ratio, dir);
cudaMemcpy(_dst.data, dst_dev, 3 * width * height * sizeof(uchar), cudaMemcpyDeviceToHost);
}*/
double ratio;
if (0 == dir) {
width += len;
ratio = (double)len / height;
}
else {
height += len;
ratio = (double)len / width;
}
_dst = Mat::zeros(Size(width, height), CV_8UC3);
uchar *src_data = _src.data;
uchar *src_dev, *dst_dev;
cudaMalloc((void**)&src_dev, 3 * _src.rows * _src.cols * sizeof(uchar));
cudaMalloc((void**)&dst_dev, 3 * width * height * sizeof(uchar));
cudaMemcpy(src_dev, src_data, 3 * _src.rows * _src.cols * sizeof(uchar), cudaMemcpyHostToDevice);
cudaMemset(dst_dev, 0, 3 * width * height * sizeof(uchar));
dim3 grid(_src.rows, _src.cols);
cutKernel <<< grid, 1 >>>(src_dev, dst_dev, width, ratio, dir);
cudaMemcpy(_dst.data, dst_dev, 3 * width * height * sizeof(uchar), cudaMemcpyDeviceToHost);
}
//int main()
//{
// Mat src = cv::imread("f.bmp" , 1); // 读入图片
// Mat dst_scale_cpu;
// Mat dst_scale_gpu;
// Mat dst_trans_cpu;
// Mat dst_trans_gpu;
// Mat dst_mirror_cpu;
// Mat dst_mirror_gpu;
// Mat dst_rotate_cpu;
// Mat dst_rotate_gpu;
// Mat dst_cut_cpu;
// Mat dst_cut_gpu;
///*
// struct timeval start;
// struct timeval end;
// unsigned long timer;
// gettimeofday(&start, NULL); // 开始计时
// resizeImage(src, dst_scale_cpu, Size(src.cols * 2, src.rows * 2)); // CPU 图片缩放 缩放后的结果存放在dst_cpu中 第三个参数为缩放大小
// gettimeofday(&end, NULL); // 结束计时
// timer = 1000000 * (end.tv_sec - start.tv_sec) + end.tv_usec - start.tv_usec;
// cout << "cpu缩放所耗费的时间:" << timer << "us\n";
//*/
// struct timeval start;
// struct timeval end;
// unsigned long timer;
// gettimeofday(&start, NULL); // 开始计时
/////////////////////////////
// resizeImage(src, dst_scale_cpu, Size(src.cols * 2, src.rows * 2));
// transferImage(src, dst_trans_cpu, 100, -100);
// mirrorImage(src, dst_mirror_cpu, 1);
// rotateImage(src, dst_rotate_cpu, 30);
// cutImage(src, dst_cut_cpu, 0, 50);
/////////////////////////////
// gettimeofday(&end, NULL); // 结束计时
// timer = 1000000 * (end.tv_sec - start.tv_sec) + end.tv_usec - start.tv_usec;
// cout << "cpu所耗费的时间:" << timer << "us\n";
// initCUDA();
// gettimeofday(&start, NULL);
/////////////////////////////
// resizeImageGPU(src, dst_scale_gpu, Size(src.cols * 2, src.rows * 2));
// transferImageGPU(src, dst_trans_gpu, 100, -100);
// mirrorImageGPU(src, dst_mirror_gpu, 1);
// rotateImageGPU(src, dst_rotate_gpu, 30);
// cutImageGPU(src, dst_cut_gpu, 0, 50);
/////////////////////////////
// gettimeofday(&end, NULL);
// timer = 1000000 * (end.tv_sec - start.tv_sec) + end.tv_usec - start.tv_usec;
// cout << "gpu所耗费的时间:" << timer << "us\n";
////////////////////////////
// imshow("原图", src);
// imshow("缩放_cpu", dst_scale_cpu);
// imshow("缩放_gpu", dst_scale_gpu);
// imshow("平移_cpu", dst_trans_cpu);
// imshow("平移_gpu", dst_trans_gpu);
// imshow("镜像_cpu", dst_mirror_cpu);
// imshow("镜像_gpu", dst_mirror_gpu);
// imshow("旋转_cpu", dst_rotate_cpu);
// imshow("旋转_gpu", dst_rotate_gpu);
// imshow("错切_cpu", dst_cut_cpu);
// imshow("错切_gpu", dst_cut_gpu);
// // transferImage(src, dst_trans_cpu, 100, -100);
// // imshow("cpu_trans", dst_trans_cpu);
// // transferImageGPU(src, dst_trans_gpu, 100, -100);
// // imshow("gpu_trans", dst_trans_gpu);
// // mirrorImage(src, dst_mirror_cpu, 1);
// // mirrorImageGPU(src, dst_mirror_gpu, 1);
// // imshow("gpu", dst_mirror_gpu);
// // rotateImage(src, dst_rotate_cpu, 30);
// // rotateImageGPU(src, dst_rotate_gpu, 30);
// // imshow("gpu", dst_rotate_gpu);
// // cutImage(src, dst_cut_cpu, 0, 50);
// // imshow("cpu", dst_cut_cpu);
// // cutImageGPU(src, dst_cut_gpu, 0, 50);
// // imshow("gpu", dst_cut_gpu);
///*
// initCUDA();
// Mat dst_gpu;
// gettimeofday(&start, NULL);
// resizeImageGPU(src, dst_gpu, Size(src.cols * 2, src.rows * 2));
//// imshow("src", src);
//// imshow(" ", dst_gpu);
// gettimeofday(&end, NULL);
// timer = 1000000 * (end.tv_sec - start.tv_sec) + end.tv_usec - start.tv_usec;
// cout << "gpu缩放所耗费的时间:" << timer << "us\n";
//// imshow("Demo", dst_gpu);
//*/
// waitKey(0);
// return 0;
//}
|
4a6e09572796b94a7db01493114600f62071864c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
void cleanup();
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
const int2 point = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y);
const int index = point.y * numCols + point.x;
if ( point.x >= numCols ||
point.y >= numRows ){ // index out of bound
return;
}
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
float average = 0.0f;
for(int filty = 0; filty < filterWidth; filty++){
for(int filtx = 0; filtx < filterWidth; filtx++){
int image_x = point.x + filtx - filterWidth/2;
int image_y = point.y + filty - filterWidth/2;
// Clamping
image_x = min(max(image_x, 0), numCols -1);
image_y = min(max(image_y, 0), numRows -1);
int pos = image_y * numCols + image_x;
float filter_weight = filter[filty * filterWidth + filtx];
average += filter_weight * static_cast<float>(inputChannel[pos]);
}
}
outputChannel[index] = average;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
const int2 point = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y);
const int index = point.y * numCols + point.x;
if ( point.x >= numCols ||
point.y >= numRows ){ // index out of bound
return;
}
//
// // Write pixel band values to the correct index in the different channels
redChannel[index] = inputImageRGBA[index].x;
greenChannel[index] = inputImageRGBA[index].y;
blueChannel[index] = inputImageRGBA[index].z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with hipMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc
checkCudaErrors(hipMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(hipMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, hipMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
//TODO: Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(32, 32);;
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize(numCols/blockSize.x + 1, numCols / blockSize.y + 1);
//TODO: Launch a kernel for separating the RGBA image into different color channels
hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA,
numRows, numCols,
d_red, d_green, d_blue);
// Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_red, d_redBlurred,
numRows, numCols,
d_filter, filterWidth);
//hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_green, d_greenBlurred,
numRows, numCols,
d_filter, filterWidth);
//hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_blue, d_blueBlurred,
numRows, numCols,
d_filter, filterWidth);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
cleanup();
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
}
|
4a6e09572796b94a7db01493114600f62071864c.cu
|
// Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
void cleanup();
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
const int2 point = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y);
const int index = point.y * numCols + point.x;
if ( point.x >= numCols ||
point.y >= numRows ){ // index out of bound
return;
}
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
float average = 0.0f;
for(int filty = 0; filty < filterWidth; filty++){
for(int filtx = 0; filtx < filterWidth; filtx++){
int image_x = point.x + filtx - filterWidth/2;
int image_y = point.y + filty - filterWidth/2;
// Clamping
image_x = min(max(image_x, 0), numCols -1);
image_y = min(max(image_y, 0), numRows -1);
int pos = image_y * numCols + image_x;
float filter_weight = filter[filty * filterWidth + filtx];
average += filter_weight * static_cast<float>(inputChannel[pos]);
}
}
outputChannel[index] = average;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
const int2 point = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y);
const int index = point.y * numCols + point.x;
if ( point.x >= numCols ||
point.y >= numRows ){ // index out of bound
return;
}
//
// // Write pixel band values to the correct index in the different channels
redChannel[index] = inputImageRGBA[index].x;
greenChannel[index] = inputImageRGBA[index].y;
blueChannel[index] = inputImageRGBA[index].z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with cudaMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc
checkCudaErrors(cudaMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(cudaMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, cudaMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
//TODO: Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(32, 32);;
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize(numCols/blockSize.x + 1, numCols / blockSize.y + 1);
//TODO: Launch a kernel for separating the RGBA image into different color channels
separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA,
numRows, numCols,
d_red, d_green, d_blue);
// Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
gaussian_blur<<<gridSize, blockSize>>>(d_red, d_redBlurred,
numRows, numCols,
d_filter, filterWidth);
//cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
gaussian_blur<<<gridSize, blockSize>>>(d_green, d_greenBlurred,
numRows, numCols,
d_filter, filterWidth);
//cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
gaussian_blur<<<gridSize, blockSize>>>(d_blue, d_blueBlurred,
numRows, numCols,
d_filter, filterWidth);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
recombineChannels<<<gridSize, blockSize>>>(d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
cleanup();
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
}
|
80280b59225dcf961479d2ca2cc05a812e68d21c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void g_getSoftMaxDelta(float* softMaxDelta, float* softMaxP, float* groudTruth, int len)
{
for(int i = 0; i < len; i += blockDim.x)
{
int id = i + threadIdx.x;
if(id < len)
{
softMaxDelta[id] = softMaxP[id] - groudTruth[id];
}
}
}
|
80280b59225dcf961479d2ca2cc05a812e68d21c.cu
|
#include "includes.h"
__global__ void g_getSoftMaxDelta(float* softMaxDelta, float* softMaxP, float* groudTruth, int len)
{
for(int i = 0; i < len; i += blockDim.x)
{
int id = i + threadIdx.x;
if(id < len)
{
softMaxDelta[id] = softMaxP[id] - groudTruth[id];
}
}
}
|
e149764685adb1a2e110d1e29236aa61e7c508aa.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <kfusion/cuda/device.hpp>
#include <kfusion/cuda/texture_binder.hpp>
using namespace kfusion::device;
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Volume initialization
namespace kfusion {
namespace device {
__global__ void clear_volume_kernel(TsdfVolume tsdf) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < tsdf.dims.x && y < tsdf.dims.y) {
ushort2* beg = tsdf.beg(x, y);
ushort2* end = beg + tsdf.dims.x * tsdf.dims.y * tsdf.dims.z;
for (ushort2* pos = beg; pos != end; pos = tsdf.zstep(pos))
*pos = pack_tsdf(0.f, 0);
}
}
} // namespace device
} // namespace kfusion
void kfusion::device::clear_volume(TsdfVolume volume) {
dim3 block(32, 8);
dim3 grid(1, 1, 1);
grid.x = divUp(volume.dims.x, block.x);
grid.y = divUp(volume.dims.y, block.y);
hipLaunchKernelGGL(( clear_volume_kernel), dim3(grid), dim3(block), 0, 0, volume);
cudaSafeCall(hipGetLastError());
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Volume integration
namespace kfusion {
namespace device {
texture<float, 2> dists_tex(0, hipFilterModePoint, hipAddressModeBorder, cudaCreateChannelDescHalf());
struct TsdfIntegrator {
Aff3f vol2cam;
Projector proj;
int2 dists_size;
float tranc_dist_inv;
__kf_device__ void operator()(TsdfVolume& volume) const {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= volume.dims.x || y >= volume.dims.y)
return;
// float3 zstep = vol2cam.R * make_float3(0.f, 0.f, volume.voxel_size.z);
float3 zstep = make_float3(vol2cam.R.data[0].z, vol2cam.R.data[1].z, vol2cam.R.data[2].z) * volume.voxel_size.z;
float3 vx = make_float3(x * volume.voxel_size.x, y * volume.voxel_size.y, 0);
float3 vc = vol2cam * vx; // tranform from volume coo frame to camera one
TsdfVolume::elem_type* vptr = volume.beg(x, y);
for (int i = 0; i < volume.dims.z; ++i, vc += zstep, vptr = volume.zstep(vptr)) {
float2 coo = proj(vc);
//#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300
// this is actually workaround for kepler. it doesn't return 0.f for texture
// fetches for out-of-border coordinates even for cudaaddressmodeborder mode
if (coo.x < 0 || coo.y < 0 || coo.x >= dists_size.x || coo.y >= dists_size.y)
continue;
//#endif
float Dp = tex2D(dists_tex, coo.x, coo.y);
if (Dp == 0 || vc.z <= 0)
continue;
float sdf = Dp - __fsqrt_rn(dot(vc, vc)); // Dp - norm(v)
if (sdf >= -volume.trunc_dist) {
float tsdf = fmin(1.f, sdf * tranc_dist_inv);
// read and unpack
int weight_prev;
float tsdf_prev = unpack_tsdf(gmem::LdCs(vptr), weight_prev);
float tsdf_new = __fdividef(__fmaf_rn(tsdf_prev, weight_prev, tsdf), weight_prev + 1);
int weight_new = min(weight_prev + 1, volume.max_weight);
// pack and write
gmem::StCs(pack_tsdf(tsdf_new, weight_new), vptr);
}
} // for(;;)
}
};
__global__ void integrate_kernel(const TsdfIntegrator integrator, TsdfVolume volume) { integrator(volume); };
} // namespace device
} // namespace kfusion
void kfusion::device::integrate(const PtrStepSz<ushort>& dists, TsdfVolume& volume, const Aff3f& aff,
const Projector& proj) {
TsdfIntegrator ti;
ti.dists_size = make_int2(dists.cols, dists.rows);
ti.vol2cam = aff;
ti.proj = proj;
ti.tranc_dist_inv = 1.f / volume.trunc_dist;
dists_tex.filterMode = hipFilterModePoint;
dists_tex.addressMode[0] = hipAddressModeBorder;
dists_tex.addressMode[1] = hipAddressModeBorder;
dists_tex.addressMode[2] = hipAddressModeBorder;
TextureBinder binder(dists, dists_tex, cudaCreateChannelDescHalf());
(void) binder;
dim3 block(32, 8);
dim3 grid(divUp(volume.dims.x, block.x), divUp(volume.dims.y, block.y));
hipLaunchKernelGGL(( integrate_kernel), dim3(grid), dim3(block), 0, 0, ti, volume);
cudaSafeCall(hipGetLastError());
cudaSafeCall(hipDeviceSynchronize());
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Volume ray casting
namespace kfusion {
namespace device {
__kf_device__ void intersect(float3 ray_org, float3 ray_dir, /*float3 box_min,*/ float3 box_max, float& tnear,
float& tfar) {
const float3 box_min = make_float3(0.f, 0.f, 0.f);
// compute intersection of ray with all six bbox planes
float3 invR = make_float3(1.f / ray_dir.x, 1.f / ray_dir.y, 1.f / ray_dir.z);
float3 tbot = invR * (box_min - ray_org);
float3 ttop = invR * (box_max - ray_org);
// re-order intersections to find smallest and largest on each axis
float3 tmin = make_float3(fminf(ttop.x, tbot.x), fminf(ttop.y, tbot.y), fminf(ttop.z, tbot.z));
float3 tmax = make_float3(fmaxf(ttop.x, tbot.x), fmaxf(ttop.y, tbot.y), fmaxf(ttop.z, tbot.z));
// find the largest tmin and the smallest tmax
tnear = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z));
tfar = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z));
}
template <typename Vol>
__kf_device__ float interpolate(const Vol& volume, const float3& p_voxels) {
float3 cf = p_voxels;
// rounding to negative infinity
int3 g = make_int3(__float2int_rd(cf.x), __float2int_rd(cf.y), __float2int_rd(cf.z));
if (g.x < 0 || g.x >= volume.dims.x - 1 || g.y < 0 || g.y >= volume.dims.y - 1 || g.z < 0 ||
g.z >= volume.dims.z - 1)
return numeric_limits<float>::quiet_NaN();
float a = cf.x - g.x;
float b = cf.y - g.y;
float c = cf.z - g.z;
float tsdf = 0.f;
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 0, g.z + 0)) * (1 - a) * (1 - b) * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 0, g.z + 1)) * (1 - a) * (1 - b) * c;
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 1, g.z + 0)) * (1 - a) * b * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 1, g.z + 1)) * (1 - a) * b * c;
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 0, g.z + 0)) * a * (1 - b) * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 0, g.z + 1)) * a * (1 - b) * c;
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 1, g.z + 0)) * a * b * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 1, g.z + 1)) * a * b * c;
return tsdf;
}
struct TsdfRaycaster {
TsdfVolume volume;
Aff3f aff;
Mat3f Rinv;
Vec3f volume_size;
Reprojector reproj;
float time_step;
float3 gradient_delta;
float3 voxel_size_inv;
TsdfRaycaster(const TsdfVolume& volume, const Aff3f& aff, const Mat3f& Rinv, const Reprojector& _reproj);
__kf_device__ float fetch_tsdf(const float3& p) const {
// rounding to nearest even
int x = __float2int_rn(p.x * voxel_size_inv.x);
int y = __float2int_rn(p.y * voxel_size_inv.y);
int z = __float2int_rn(p.z * voxel_size_inv.z);
return unpack_tsdf(*volume(x, y, z));
}
__kf_device__ void operator()(PtrStepSz<ushort> depth, PtrStep<Normal> normals) const {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= depth.cols || y >= depth.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN();
depth(y, x) = 0;
normals(y, x) = make_float4(qnan, qnan, qnan, qnan);
float3 ray_org = aff.t;
float3 ray_dir = normalized(aff.R * reproj(x, y, 1.f));
// We do subtract voxel size to minimize checks after
// Note: origin of volume coordinate is placeed
// in the center of voxel (0,0,0), not in the corener of the voxel!
float3 box_max = volume_size - volume.voxel_size;
float tmin, tmax;
intersect(ray_org, ray_dir, box_max, tmin, tmax);
const float min_dist = 0.f;
tmin = fmax(min_dist, tmin);
if (tmin >= tmax)
return;
tmax -= time_step;
float3 vstep = ray_dir * time_step;
float3 next = ray_org + ray_dir * tmin;
float tsdf_next = fetch_tsdf(next);
for (float tcurr = tmin; tcurr < tmax; tcurr += time_step) {
float tsdf_curr = tsdf_next;
float3 curr = next;
next += vstep;
tsdf_next = fetch_tsdf(next);
if (tsdf_curr < 0.f && tsdf_next > 0.f)
break;
if (tsdf_curr > 0.f && tsdf_next < 0.f) {
float Ft = interpolate(volume, curr * voxel_size_inv);
float Ftdt = interpolate(volume, next * voxel_size_inv);
float Ts = tcurr - __fdividef(time_step * Ft, Ftdt - Ft);
float3 vertex = ray_org + ray_dir * Ts;
float3 normal = compute_normal(vertex);
if (!isnan(normal.x * normal.y * normal.z)) {
normal = Rinv * normal;
vertex = Rinv * (vertex - aff.t);
normals(y, x) = make_float4(normal.x, normal.y, normal.z, 0);
depth(y, x) = static_cast<ushort>(vertex.z * 1000);
}
break;
}
} /* for (;;) */
}
__kf_device__ void operator()(PtrStepSz<Point> points, PtrStep<Normal> normals) const {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= points.cols || y >= points.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN();
points(y, x) = normals(y, x) = make_float4(qnan, qnan, qnan, qnan);
float3 ray_org = aff.t;
float3 ray_dir = normalized(aff.R * reproj(x, y, 1.f));
// We do subtract voxel size to minimize checks after
// Note: origin of volume coordinate is placeed
// in the center of voxel (0,0,0), not in the corener of the voxel!
float3 box_max = volume_size - volume.voxel_size;
float tmin, tmax;
intersect(ray_org, ray_dir, box_max, tmin, tmax);
const float min_dist = 0.f;
tmin = fmax(min_dist, tmin);
if (tmin >= tmax)
return;
tmax -= time_step;
float3 vstep = ray_dir * time_step;
float3 next = ray_org + ray_dir * tmin;
float tsdf_next = fetch_tsdf(next);
for (float tcurr = tmin; tcurr < tmax; tcurr += time_step) {
float tsdf_curr = tsdf_next;
float3 curr = next;
next += vstep;
tsdf_next = fetch_tsdf(next);
if (tsdf_curr < 0.f && tsdf_next > 0.f)
break;
if (tsdf_curr > 0.f && tsdf_next < 0.f) {
float Ft = interpolate(volume, curr * voxel_size_inv);
float Ftdt = interpolate(volume, next * voxel_size_inv);
float Ts = tcurr - __fdividef(time_step * Ft, Ftdt - Ft);
float3 vertex = ray_org + ray_dir * Ts;
float3 normal = compute_normal(vertex);
if (!isnan(normal.x * normal.y * normal.z)) {
normal = Rinv * normal;
vertex = Rinv * (vertex - aff.t);
normals(y, x) = make_float4(normal.x, normal.y, normal.z, 0.f);
points(y, x) = make_float4(vertex.x, vertex.y, vertex.z, 0.f);
}
break;
}
} /* for (;;) */
}
__kf_device__ float3 compute_normal(const float3& p) const {
float3 n;
float Fx1 = interpolate(volume, make_float3(p.x + gradient_delta.x, p.y, p.z) * voxel_size_inv);
float Fx2 = interpolate(volume, make_float3(p.x - gradient_delta.x, p.y, p.z) * voxel_size_inv);
n.x = __fdividef(Fx1 - Fx2, gradient_delta.x);
float Fy1 = interpolate(volume, make_float3(p.x, p.y + gradient_delta.y, p.z) * voxel_size_inv);
float Fy2 = interpolate(volume, make_float3(p.x, p.y - gradient_delta.y, p.z) * voxel_size_inv);
n.y = __fdividef(Fy1 - Fy2, gradient_delta.y);
float Fz1 = interpolate(volume, make_float3(p.x, p.y, p.z + gradient_delta.z) * voxel_size_inv);
float Fz2 = interpolate(volume, make_float3(p.x, p.y, p.z - gradient_delta.z) * voxel_size_inv);
n.z = __fdividef(Fz1 - Fz2, gradient_delta.z);
return normalized(n);
}
};
inline TsdfRaycaster::TsdfRaycaster(const TsdfVolume& _volume, const Aff3f& _aff, const Mat3f& _Rinv,
const Reprojector& _reproj)
: volume(_volume), aff(_aff), Rinv(_Rinv), reproj(_reproj) {}
__global__ void raycast_kernel(const TsdfRaycaster raycaster, PtrStepSz<ushort> depth, PtrStep<Normal> normals) {
raycaster(depth, normals);
};
__global__ void raycast_kernel(const TsdfRaycaster raycaster, PtrStepSz<Point> points, PtrStep<Normal> normals) {
raycaster(points, normals);
};
} // namespace device
} // namespace kfusion
void kfusion::device::raycast(const TsdfVolume& volume, const Aff3f& aff, const Mat3f& Rinv, const Reprojector& reproj,
Depth& depth, Normals& normals, float raycaster_step_factor,
float gradient_delta_factor) {
TsdfRaycaster rc(volume, aff, Rinv, reproj);
rc.volume_size = volume.voxel_size * volume.dims;
rc.time_step = volume.trunc_dist * raycaster_step_factor;
rc.gradient_delta = volume.voxel_size * gradient_delta_factor;
rc.voxel_size_inv = 1.f / volume.voxel_size;
dim3 block(32, 8);
dim3 grid(divUp(depth.cols(), block.x), divUp(depth.rows(), block.y));
hipLaunchKernelGGL(( raycast_kernel), dim3(grid), dim3(block), 0, 0, rc, (PtrStepSz<ushort>) depth, normals);
cudaSafeCall(hipGetLastError());
}
void kfusion::device::raycast(const TsdfVolume& volume, const Aff3f& aff, const Mat3f& Rinv, const Reprojector& reproj,
Points& points, Normals& normals, float raycaster_step_factor,
float gradient_delta_factor) {
TsdfRaycaster rc(volume, aff, Rinv, reproj);
rc.volume_size = volume.voxel_size * volume.dims;
rc.time_step = volume.trunc_dist * raycaster_step_factor;
rc.gradient_delta = volume.voxel_size * gradient_delta_factor;
rc.voxel_size_inv = 1.f / volume.voxel_size;
dim3 block(32, 8);
dim3 grid(divUp(points.cols(), block.x), divUp(points.rows(), block.y));
hipLaunchKernelGGL(( raycast_kernel), dim3(grid), dim3(block), 0, 0, rc, (PtrStepSz<Point>) points, normals);
cudaSafeCall(hipGetLastError());
}
////////////////////////////////////////////////////////////////////////////////////////
/// Volume cloud exctraction
namespace kfusion {
namespace device {
////////////////////////////////////////////////////////////////////////////////////////
///// Prefix Scan utility
enum ScanKind { exclusive, inclusive };
template <ScanKind Kind, class T>
__kf_device__ T scan_warp(volatile T* ptr, const unsigned int idx = threadIdx.x) {
const unsigned int lane = idx & 31; // index of thread in warp (0..31)
if (lane >= 1)
ptr[idx] = ptr[idx - 1] + ptr[idx];
if (lane >= 2)
ptr[idx] = ptr[idx - 2] + ptr[idx];
if (lane >= 4)
ptr[idx] = ptr[idx - 4] + ptr[idx];
if (lane >= 8)
ptr[idx] = ptr[idx - 8] + ptr[idx];
if (lane >= 16)
ptr[idx] = ptr[idx - 16] + ptr[idx];
if (Kind == inclusive)
return ptr[idx];
else
return (lane > 0) ? ptr[idx - 1] : 0;
}
__device__ int global_count = 0;
__device__ int output_count;
__device__ unsigned int blocks_done = 0;
struct FullScan6 {
enum {
CTA_SIZE_X = 32,
CTA_SIZE_Y = 6,
CTA_SIZE = CTA_SIZE_X * CTA_SIZE_Y,
MAX_LOCAL_POINTS = 3
};
TsdfVolume volume;
Aff3f aff;
FullScan6(const TsdfVolume& vol) : volume(vol) {}
__kf_device__ float fetch(int x, int y, int z, int& weight) const { return unpack_tsdf(*volume(x, y, z), weight); }
__kf_device__ void operator()(PtrSz<Point> output) const {
int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
#if __CUDA_ARCH__ < 200
__shared__ int cta_buffer[CTA_SIZE];
#endif
#if __CUDA_ARCH__ >= 120
if (__all(x >= volume.dims.x) || __all(y >= volume.dims.y))
return;
#else
if (Emulation::All(x >= volume.dims.x, cta_buffer) || Emulation::All(y >= volume.dims.y, cta_buffer))
return;
#endif
float3 V;
V.x = (x + 0.5f) * volume.voxel_size.x;
V.y = (y + 0.5f) * volume.voxel_size.y;
int ftid = Block::flattenedThreadId();
for (int z = 0; z < volume.dims.z - 1; ++z) {
float3 points[MAX_LOCAL_POINTS];
int local_count = 0;
if (x < volume.dims.x && y < volume.dims.y) {
int W;
float F = fetch(x, y, z, W);
if (W != 0 && F != 1.f) {
V.z = (z + 0.5f) * volume.voxel_size.z;
// process dx
if (x + 1 < volume.dims.x) {
int Wn;
float Fn = fetch(x + 1, y, z, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0)) {
float3 p;
p.y = V.y;
p.z = V.z;
float Vnx = V.x + volume.voxel_size.x;
float d_inv = 1.f / (fabs(F) + fabs(Fn));
p.x = (V.x * fabs(Fn) + Vnx * fabs(F)) * d_inv;
points[local_count++] = aff * p;
}
} /* if (x + 1 < volume.dims.x) */
// process dy
if (y + 1 < volume.dims.y) {
int Wn;
float Fn = fetch(x, y + 1, z, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0)) {
float3 p;
p.x = V.x;
p.z = V.z;
float Vny = V.y + volume.voxel_size.y;
float d_inv = 1.f / (fabs(F) + fabs(Fn));
p.y = (V.y * fabs(Fn) + Vny * fabs(F)) * d_inv;
points[local_count++] = aff * p;
}
} /* if (y + 1 < volume.dims.y) */
// process dz
// if (z + 1 < volume.dims.z) // guaranteed by loop
{
int Wn;
float Fn = fetch(x, y, z + 1, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0)) {
float3 p;
p.x = V.x;
p.y = V.y;
float Vnz = V.z + volume.voxel_size.z;
float d_inv = 1.f / (fabs(F) + fabs(Fn));
p.z = (V.z * fabs(Fn) + Vnz * fabs(F)) * d_inv;
points[local_count++] = aff * p;
}
} /* if (z + 1 < volume.dims.z) */
} /* if (W != 0 && F != 1.f) */
} /* if (x < volume.dims.x && y < volume.dims.y) */
#if __CUDA_ARCH__ >= 200
/// not we fulfilled points array at current iteration
int total_warp = __popc(__ballot(local_count > 0)) + __popc(__ballot(local_count > 1)) +
__popc(__ballot(local_count > 2));
#else
int tid = Block::flattenedThreadId();
cta_buffer[tid] = local_count;
int total_warp = Emulation::warp_reduce(cta_buffer, tid);
#endif
__shared__ float storage_X[CTA_SIZE * MAX_LOCAL_POINTS];
__shared__ float storage_Y[CTA_SIZE * MAX_LOCAL_POINTS];
__shared__ float storage_Z[CTA_SIZE * MAX_LOCAL_POINTS];
if (total_warp > 0) {
int lane = Warp::laneId();
int storage_index = (ftid >> Warp::LOG_WARP_SIZE) * Warp::WARP_SIZE * MAX_LOCAL_POINTS;
volatile int* cta_buffer = (int*) (storage_X + storage_index);
cta_buffer[lane] = local_count;
int offset = scan_warp<exclusive>(cta_buffer, lane);
if (lane == 0) {
int old_global_count = atomicAdd(&global_count, total_warp);
cta_buffer[0] = old_global_count;
}
int old_global_count = cta_buffer[0];
for (int l = 0; l < local_count; ++l) {
storage_X[storage_index + offset + l] = points[l].x;
storage_Y[storage_index + offset + l] = points[l].y;
storage_Z[storage_index + offset + l] = points[l].z;
}
Point* pos = output.data + old_global_count + lane;
for (int idx = lane; idx < total_warp; idx += Warp::STRIDE, pos += Warp::STRIDE) {
float x = storage_X[storage_index + idx];
float y = storage_Y[storage_index + idx];
float z = storage_Z[storage_index + idx];
*pos = make_float4(x, y, z, 0.f);
}
bool full = (old_global_count + total_warp) >= output.size;
if (full)
break;
}
} /* for(int z = 0; z < volume.dims.z - 1; ++z) */
///////////////////////////
// prepare for future scans
if (ftid == 0) {
unsigned int total_blocks = gridDim.x * gridDim.y * gridDim.z;
unsigned int value = atomicInc(&blocks_done, total_blocks);
// last block
if (value == total_blocks - 1) {
output_count = min((int) output.size, global_count);
blocks_done = 0;
global_count = 0;
}
}
}
};
__global__ void extract_kernel(const FullScan6 fs, PtrSz<Point> output) { fs(output); }
struct ExtractNormals {
typedef float8 float8;
TsdfVolume volume;
PtrSz<Point> points;
float3 voxel_size_inv;
float3 gradient_delta;
Aff3f aff;
Mat3f Rinv;
ExtractNormals(const TsdfVolume& vol) : volume(vol) {
voxel_size_inv.x = 1.f / volume.voxel_size.x;
voxel_size_inv.y = 1.f / volume.voxel_size.y;
voxel_size_inv.z = 1.f / volume.voxel_size.z;
}
__kf_device__ int3 getVoxel(const float3& p) const {
// rounding to nearest even
int x = __float2int_rn(p.x * voxel_size_inv.x);
int y = __float2int_rn(p.y * voxel_size_inv.y);
int z = __float2int_rn(p.z * voxel_size_inv.z);
return make_int3(x, y, z);
}
__kf_device__ void operator()(float4* output) const {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= points.size)
return;
const float qnan = numeric_limits<float>::quiet_NaN();
float3 n = make_float3(qnan, qnan, qnan);
float3 point = Rinv * (tr(points.data[idx]) - aff.t);
int3 g = getVoxel(point);
if (g.x > 1 && g.y > 1 && g.z > 1 && g.x < volume.dims.x - 2 && g.y < volume.dims.y - 2 &&
g.z < volume.dims.z - 2) {
float3 t;
t = point;
t.x += gradient_delta.x;
;
float Fx1 = interpolate(volume, t * voxel_size_inv);
t = point;
t.x -= gradient_delta.x;
float Fx2 = interpolate(volume, t * voxel_size_inv);
n.x = __fdividef(Fx1 - Fx2, gradient_delta.x);
t = point;
t.y += gradient_delta.y;
float Fy1 = interpolate(volume, t * voxel_size_inv);
t = point;
t.y -= gradient_delta.y;
float Fy2 = interpolate(volume, t * voxel_size_inv);
n.y = __fdividef(Fy1 - Fy2, gradient_delta.y);
t = point;
t.z += gradient_delta.z;
float Fz1 = interpolate(volume, t * voxel_size_inv);
t = point;
t.z -= gradient_delta.z;
float Fz2 = interpolate(volume, t * voxel_size_inv);
n.z = __fdividef(Fz1 - Fz2, gradient_delta.z);
n = normalized(aff.R * n);
}
output[idx] = make_float4(n.x, n.y, n.z, 0);
}
};
__global__ void extract_normals_kernel(const ExtractNormals en, float4* output) { en(output); }
} // namespace device
} // namespace kfusion
size_t kfusion::device::extractCloud(const TsdfVolume& volume, const Aff3f& aff, PtrSz<Point> output) {
typedef FullScan6 FS;
FS fs(volume);
fs.aff = aff;
dim3 block(FS::CTA_SIZE_X, FS::CTA_SIZE_Y);
dim3 grid(divUp(volume.dims.x, block.x), divUp(volume.dims.y, block.y));
hipLaunchKernelGGL(( extract_kernel), dim3(grid), dim3(block), 0, 0, fs, output);
cudaSafeCall(hipGetLastError());
cudaSafeCall(hipDeviceSynchronize());
int size;
cudaSafeCall(hipMemcpyFromSymbol(&size, output_count, sizeof(size)));
return (size_t) size;
}
void kfusion::device::extractNormals(const TsdfVolume& volume, const PtrSz<Point>& points, const Aff3f& aff,
const Mat3f& Rinv, float gradient_delta_factor, float4* output) {
ExtractNormals en(volume);
en.points = points;
en.gradient_delta = volume.voxel_size * gradient_delta_factor;
en.aff = aff;
en.Rinv = Rinv;
dim3 block(256);
dim3 grid(divUp((int) points.size, block.x));
hipLaunchKernelGGL(( extract_normals_kernel), dim3(grid), dim3(block), 0, 0, en, output);
cudaSafeCall(hipGetLastError());
cudaSafeCall(hipDeviceSynchronize());
}
|
e149764685adb1a2e110d1e29236aa61e7c508aa.cu
|
#include <kfusion/cuda/device.hpp>
#include <kfusion/cuda/texture_binder.hpp>
using namespace kfusion::device;
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Volume initialization
namespace kfusion {
namespace device {
__global__ void clear_volume_kernel(TsdfVolume tsdf) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < tsdf.dims.x && y < tsdf.dims.y) {
ushort2* beg = tsdf.beg(x, y);
ushort2* end = beg + tsdf.dims.x * tsdf.dims.y * tsdf.dims.z;
for (ushort2* pos = beg; pos != end; pos = tsdf.zstep(pos))
*pos = pack_tsdf(0.f, 0);
}
}
} // namespace device
} // namespace kfusion
void kfusion::device::clear_volume(TsdfVolume volume) {
dim3 block(32, 8);
dim3 grid(1, 1, 1);
grid.x = divUp(volume.dims.x, block.x);
grid.y = divUp(volume.dims.y, block.y);
clear_volume_kernel<<<grid, block>>>(volume);
cudaSafeCall(cudaGetLastError());
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Volume integration
namespace kfusion {
namespace device {
texture<float, 2> dists_tex(0, cudaFilterModePoint, cudaAddressModeBorder, cudaCreateChannelDescHalf());
struct TsdfIntegrator {
Aff3f vol2cam;
Projector proj;
int2 dists_size;
float tranc_dist_inv;
__kf_device__ void operator()(TsdfVolume& volume) const {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= volume.dims.x || y >= volume.dims.y)
return;
// float3 zstep = vol2cam.R * make_float3(0.f, 0.f, volume.voxel_size.z);
float3 zstep = make_float3(vol2cam.R.data[0].z, vol2cam.R.data[1].z, vol2cam.R.data[2].z) * volume.voxel_size.z;
float3 vx = make_float3(x * volume.voxel_size.x, y * volume.voxel_size.y, 0);
float3 vc = vol2cam * vx; // tranform from volume coo frame to camera one
TsdfVolume::elem_type* vptr = volume.beg(x, y);
for (int i = 0; i < volume.dims.z; ++i, vc += zstep, vptr = volume.zstep(vptr)) {
float2 coo = proj(vc);
//#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300
// this is actually workaround for kepler. it doesn't return 0.f for texture
// fetches for out-of-border coordinates even for cudaaddressmodeborder mode
if (coo.x < 0 || coo.y < 0 || coo.x >= dists_size.x || coo.y >= dists_size.y)
continue;
//#endif
float Dp = tex2D(dists_tex, coo.x, coo.y);
if (Dp == 0 || vc.z <= 0)
continue;
float sdf = Dp - __fsqrt_rn(dot(vc, vc)); // Dp - norm(v)
if (sdf >= -volume.trunc_dist) {
float tsdf = fmin(1.f, sdf * tranc_dist_inv);
// read and unpack
int weight_prev;
float tsdf_prev = unpack_tsdf(gmem::LdCs(vptr), weight_prev);
float tsdf_new = __fdividef(__fmaf_rn(tsdf_prev, weight_prev, tsdf), weight_prev + 1);
int weight_new = min(weight_prev + 1, volume.max_weight);
// pack and write
gmem::StCs(pack_tsdf(tsdf_new, weight_new), vptr);
}
} // for(;;)
}
};
__global__ void integrate_kernel(const TsdfIntegrator integrator, TsdfVolume volume) { integrator(volume); };
} // namespace device
} // namespace kfusion
void kfusion::device::integrate(const PtrStepSz<ushort>& dists, TsdfVolume& volume, const Aff3f& aff,
const Projector& proj) {
TsdfIntegrator ti;
ti.dists_size = make_int2(dists.cols, dists.rows);
ti.vol2cam = aff;
ti.proj = proj;
ti.tranc_dist_inv = 1.f / volume.trunc_dist;
dists_tex.filterMode = cudaFilterModePoint;
dists_tex.addressMode[0] = cudaAddressModeBorder;
dists_tex.addressMode[1] = cudaAddressModeBorder;
dists_tex.addressMode[2] = cudaAddressModeBorder;
TextureBinder binder(dists, dists_tex, cudaCreateChannelDescHalf());
(void) binder;
dim3 block(32, 8);
dim3 grid(divUp(volume.dims.x, block.x), divUp(volume.dims.y, block.y));
integrate_kernel<<<grid, block>>>(ti, volume);
cudaSafeCall(cudaGetLastError());
cudaSafeCall(cudaDeviceSynchronize());
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Volume ray casting
namespace kfusion {
namespace device {
__kf_device__ void intersect(float3 ray_org, float3 ray_dir, /*float3 box_min,*/ float3 box_max, float& tnear,
float& tfar) {
const float3 box_min = make_float3(0.f, 0.f, 0.f);
// compute intersection of ray with all six bbox planes
float3 invR = make_float3(1.f / ray_dir.x, 1.f / ray_dir.y, 1.f / ray_dir.z);
float3 tbot = invR * (box_min - ray_org);
float3 ttop = invR * (box_max - ray_org);
// re-order intersections to find smallest and largest on each axis
float3 tmin = make_float3(fminf(ttop.x, tbot.x), fminf(ttop.y, tbot.y), fminf(ttop.z, tbot.z));
float3 tmax = make_float3(fmaxf(ttop.x, tbot.x), fmaxf(ttop.y, tbot.y), fmaxf(ttop.z, tbot.z));
// find the largest tmin and the smallest tmax
tnear = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z));
tfar = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z));
}
template <typename Vol>
__kf_device__ float interpolate(const Vol& volume, const float3& p_voxels) {
float3 cf = p_voxels;
// rounding to negative infinity
int3 g = make_int3(__float2int_rd(cf.x), __float2int_rd(cf.y), __float2int_rd(cf.z));
if (g.x < 0 || g.x >= volume.dims.x - 1 || g.y < 0 || g.y >= volume.dims.y - 1 || g.z < 0 ||
g.z >= volume.dims.z - 1)
return numeric_limits<float>::quiet_NaN();
float a = cf.x - g.x;
float b = cf.y - g.y;
float c = cf.z - g.z;
float tsdf = 0.f;
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 0, g.z + 0)) * (1 - a) * (1 - b) * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 0, g.z + 1)) * (1 - a) * (1 - b) * c;
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 1, g.z + 0)) * (1 - a) * b * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 1, g.z + 1)) * (1 - a) * b * c;
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 0, g.z + 0)) * a * (1 - b) * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 0, g.z + 1)) * a * (1 - b) * c;
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 1, g.z + 0)) * a * b * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 1, g.z + 1)) * a * b * c;
return tsdf;
}
struct TsdfRaycaster {
TsdfVolume volume;
Aff3f aff;
Mat3f Rinv;
Vec3f volume_size;
Reprojector reproj;
float time_step;
float3 gradient_delta;
float3 voxel_size_inv;
TsdfRaycaster(const TsdfVolume& volume, const Aff3f& aff, const Mat3f& Rinv, const Reprojector& _reproj);
__kf_device__ float fetch_tsdf(const float3& p) const {
// rounding to nearest even
int x = __float2int_rn(p.x * voxel_size_inv.x);
int y = __float2int_rn(p.y * voxel_size_inv.y);
int z = __float2int_rn(p.z * voxel_size_inv.z);
return unpack_tsdf(*volume(x, y, z));
}
__kf_device__ void operator()(PtrStepSz<ushort> depth, PtrStep<Normal> normals) const {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= depth.cols || y >= depth.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN();
depth(y, x) = 0;
normals(y, x) = make_float4(qnan, qnan, qnan, qnan);
float3 ray_org = aff.t;
float3 ray_dir = normalized(aff.R * reproj(x, y, 1.f));
// We do subtract voxel size to minimize checks after
// Note: origin of volume coordinate is placeed
// in the center of voxel (0,0,0), not in the corener of the voxel!
float3 box_max = volume_size - volume.voxel_size;
float tmin, tmax;
intersect(ray_org, ray_dir, box_max, tmin, tmax);
const float min_dist = 0.f;
tmin = fmax(min_dist, tmin);
if (tmin >= tmax)
return;
tmax -= time_step;
float3 vstep = ray_dir * time_step;
float3 next = ray_org + ray_dir * tmin;
float tsdf_next = fetch_tsdf(next);
for (float tcurr = tmin; tcurr < tmax; tcurr += time_step) {
float tsdf_curr = tsdf_next;
float3 curr = next;
next += vstep;
tsdf_next = fetch_tsdf(next);
if (tsdf_curr < 0.f && tsdf_next > 0.f)
break;
if (tsdf_curr > 0.f && tsdf_next < 0.f) {
float Ft = interpolate(volume, curr * voxel_size_inv);
float Ftdt = interpolate(volume, next * voxel_size_inv);
float Ts = tcurr - __fdividef(time_step * Ft, Ftdt - Ft);
float3 vertex = ray_org + ray_dir * Ts;
float3 normal = compute_normal(vertex);
if (!isnan(normal.x * normal.y * normal.z)) {
normal = Rinv * normal;
vertex = Rinv * (vertex - aff.t);
normals(y, x) = make_float4(normal.x, normal.y, normal.z, 0);
depth(y, x) = static_cast<ushort>(vertex.z * 1000);
}
break;
}
} /* for (;;) */
}
__kf_device__ void operator()(PtrStepSz<Point> points, PtrStep<Normal> normals) const {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= points.cols || y >= points.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN();
points(y, x) = normals(y, x) = make_float4(qnan, qnan, qnan, qnan);
float3 ray_org = aff.t;
float3 ray_dir = normalized(aff.R * reproj(x, y, 1.f));
// We do subtract voxel size to minimize checks after
// Note: origin of volume coordinate is placeed
// in the center of voxel (0,0,0), not in the corener of the voxel!
float3 box_max = volume_size - volume.voxel_size;
float tmin, tmax;
intersect(ray_org, ray_dir, box_max, tmin, tmax);
const float min_dist = 0.f;
tmin = fmax(min_dist, tmin);
if (tmin >= tmax)
return;
tmax -= time_step;
float3 vstep = ray_dir * time_step;
float3 next = ray_org + ray_dir * tmin;
float tsdf_next = fetch_tsdf(next);
for (float tcurr = tmin; tcurr < tmax; tcurr += time_step) {
float tsdf_curr = tsdf_next;
float3 curr = next;
next += vstep;
tsdf_next = fetch_tsdf(next);
if (tsdf_curr < 0.f && tsdf_next > 0.f)
break;
if (tsdf_curr > 0.f && tsdf_next < 0.f) {
float Ft = interpolate(volume, curr * voxel_size_inv);
float Ftdt = interpolate(volume, next * voxel_size_inv);
float Ts = tcurr - __fdividef(time_step * Ft, Ftdt - Ft);
float3 vertex = ray_org + ray_dir * Ts;
float3 normal = compute_normal(vertex);
if (!isnan(normal.x * normal.y * normal.z)) {
normal = Rinv * normal;
vertex = Rinv * (vertex - aff.t);
normals(y, x) = make_float4(normal.x, normal.y, normal.z, 0.f);
points(y, x) = make_float4(vertex.x, vertex.y, vertex.z, 0.f);
}
break;
}
} /* for (;;) */
}
__kf_device__ float3 compute_normal(const float3& p) const {
float3 n;
float Fx1 = interpolate(volume, make_float3(p.x + gradient_delta.x, p.y, p.z) * voxel_size_inv);
float Fx2 = interpolate(volume, make_float3(p.x - gradient_delta.x, p.y, p.z) * voxel_size_inv);
n.x = __fdividef(Fx1 - Fx2, gradient_delta.x);
float Fy1 = interpolate(volume, make_float3(p.x, p.y + gradient_delta.y, p.z) * voxel_size_inv);
float Fy2 = interpolate(volume, make_float3(p.x, p.y - gradient_delta.y, p.z) * voxel_size_inv);
n.y = __fdividef(Fy1 - Fy2, gradient_delta.y);
float Fz1 = interpolate(volume, make_float3(p.x, p.y, p.z + gradient_delta.z) * voxel_size_inv);
float Fz2 = interpolate(volume, make_float3(p.x, p.y, p.z - gradient_delta.z) * voxel_size_inv);
n.z = __fdividef(Fz1 - Fz2, gradient_delta.z);
return normalized(n);
}
};
inline TsdfRaycaster::TsdfRaycaster(const TsdfVolume& _volume, const Aff3f& _aff, const Mat3f& _Rinv,
const Reprojector& _reproj)
: volume(_volume), aff(_aff), Rinv(_Rinv), reproj(_reproj) {}
__global__ void raycast_kernel(const TsdfRaycaster raycaster, PtrStepSz<ushort> depth, PtrStep<Normal> normals) {
raycaster(depth, normals);
};
__global__ void raycast_kernel(const TsdfRaycaster raycaster, PtrStepSz<Point> points, PtrStep<Normal> normals) {
raycaster(points, normals);
};
} // namespace device
} // namespace kfusion
void kfusion::device::raycast(const TsdfVolume& volume, const Aff3f& aff, const Mat3f& Rinv, const Reprojector& reproj,
Depth& depth, Normals& normals, float raycaster_step_factor,
float gradient_delta_factor) {
TsdfRaycaster rc(volume, aff, Rinv, reproj);
rc.volume_size = volume.voxel_size * volume.dims;
rc.time_step = volume.trunc_dist * raycaster_step_factor;
rc.gradient_delta = volume.voxel_size * gradient_delta_factor;
rc.voxel_size_inv = 1.f / volume.voxel_size;
dim3 block(32, 8);
dim3 grid(divUp(depth.cols(), block.x), divUp(depth.rows(), block.y));
raycast_kernel<<<grid, block>>>(rc, (PtrStepSz<ushort>) depth, normals);
cudaSafeCall(cudaGetLastError());
}
void kfusion::device::raycast(const TsdfVolume& volume, const Aff3f& aff, const Mat3f& Rinv, const Reprojector& reproj,
Points& points, Normals& normals, float raycaster_step_factor,
float gradient_delta_factor) {
TsdfRaycaster rc(volume, aff, Rinv, reproj);
rc.volume_size = volume.voxel_size * volume.dims;
rc.time_step = volume.trunc_dist * raycaster_step_factor;
rc.gradient_delta = volume.voxel_size * gradient_delta_factor;
rc.voxel_size_inv = 1.f / volume.voxel_size;
dim3 block(32, 8);
dim3 grid(divUp(points.cols(), block.x), divUp(points.rows(), block.y));
raycast_kernel<<<grid, block>>>(rc, (PtrStepSz<Point>) points, normals);
cudaSafeCall(cudaGetLastError());
}
////////////////////////////////////////////////////////////////////////////////////////
/// Volume cloud exctraction
namespace kfusion {
namespace device {
////////////////////////////////////////////////////////////////////////////////////////
///// Prefix Scan utility
enum ScanKind { exclusive, inclusive };
template <ScanKind Kind, class T>
__kf_device__ T scan_warp(volatile T* ptr, const unsigned int idx = threadIdx.x) {
const unsigned int lane = idx & 31; // index of thread in warp (0..31)
if (lane >= 1)
ptr[idx] = ptr[idx - 1] + ptr[idx];
if (lane >= 2)
ptr[idx] = ptr[idx - 2] + ptr[idx];
if (lane >= 4)
ptr[idx] = ptr[idx - 4] + ptr[idx];
if (lane >= 8)
ptr[idx] = ptr[idx - 8] + ptr[idx];
if (lane >= 16)
ptr[idx] = ptr[idx - 16] + ptr[idx];
if (Kind == inclusive)
return ptr[idx];
else
return (lane > 0) ? ptr[idx - 1] : 0;
}
__device__ int global_count = 0;
__device__ int output_count;
__device__ unsigned int blocks_done = 0;
struct FullScan6 {
enum {
CTA_SIZE_X = 32,
CTA_SIZE_Y = 6,
CTA_SIZE = CTA_SIZE_X * CTA_SIZE_Y,
MAX_LOCAL_POINTS = 3
};
TsdfVolume volume;
Aff3f aff;
FullScan6(const TsdfVolume& vol) : volume(vol) {}
__kf_device__ float fetch(int x, int y, int z, int& weight) const { return unpack_tsdf(*volume(x, y, z), weight); }
__kf_device__ void operator()(PtrSz<Point> output) const {
int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
#if __CUDA_ARCH__ < 200
__shared__ int cta_buffer[CTA_SIZE];
#endif
#if __CUDA_ARCH__ >= 120
if (__all(x >= volume.dims.x) || __all(y >= volume.dims.y))
return;
#else
if (Emulation::All(x >= volume.dims.x, cta_buffer) || Emulation::All(y >= volume.dims.y, cta_buffer))
return;
#endif
float3 V;
V.x = (x + 0.5f) * volume.voxel_size.x;
V.y = (y + 0.5f) * volume.voxel_size.y;
int ftid = Block::flattenedThreadId();
for (int z = 0; z < volume.dims.z - 1; ++z) {
float3 points[MAX_LOCAL_POINTS];
int local_count = 0;
if (x < volume.dims.x && y < volume.dims.y) {
int W;
float F = fetch(x, y, z, W);
if (W != 0 && F != 1.f) {
V.z = (z + 0.5f) * volume.voxel_size.z;
// process dx
if (x + 1 < volume.dims.x) {
int Wn;
float Fn = fetch(x + 1, y, z, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0)) {
float3 p;
p.y = V.y;
p.z = V.z;
float Vnx = V.x + volume.voxel_size.x;
float d_inv = 1.f / (fabs(F) + fabs(Fn));
p.x = (V.x * fabs(Fn) + Vnx * fabs(F)) * d_inv;
points[local_count++] = aff * p;
}
} /* if (x + 1 < volume.dims.x) */
// process dy
if (y + 1 < volume.dims.y) {
int Wn;
float Fn = fetch(x, y + 1, z, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0)) {
float3 p;
p.x = V.x;
p.z = V.z;
float Vny = V.y + volume.voxel_size.y;
float d_inv = 1.f / (fabs(F) + fabs(Fn));
p.y = (V.y * fabs(Fn) + Vny * fabs(F)) * d_inv;
points[local_count++] = aff * p;
}
} /* if (y + 1 < volume.dims.y) */
// process dz
// if (z + 1 < volume.dims.z) // guaranteed by loop
{
int Wn;
float Fn = fetch(x, y, z + 1, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0)) {
float3 p;
p.x = V.x;
p.y = V.y;
float Vnz = V.z + volume.voxel_size.z;
float d_inv = 1.f / (fabs(F) + fabs(Fn));
p.z = (V.z * fabs(Fn) + Vnz * fabs(F)) * d_inv;
points[local_count++] = aff * p;
}
} /* if (z + 1 < volume.dims.z) */
} /* if (W != 0 && F != 1.f) */
} /* if (x < volume.dims.x && y < volume.dims.y) */
#if __CUDA_ARCH__ >= 200
/// not we fulfilled points array at current iteration
int total_warp = __popc(__ballot(local_count > 0)) + __popc(__ballot(local_count > 1)) +
__popc(__ballot(local_count > 2));
#else
int tid = Block::flattenedThreadId();
cta_buffer[tid] = local_count;
int total_warp = Emulation::warp_reduce(cta_buffer, tid);
#endif
__shared__ float storage_X[CTA_SIZE * MAX_LOCAL_POINTS];
__shared__ float storage_Y[CTA_SIZE * MAX_LOCAL_POINTS];
__shared__ float storage_Z[CTA_SIZE * MAX_LOCAL_POINTS];
if (total_warp > 0) {
int lane = Warp::laneId();
int storage_index = (ftid >> Warp::LOG_WARP_SIZE) * Warp::WARP_SIZE * MAX_LOCAL_POINTS;
volatile int* cta_buffer = (int*) (storage_X + storage_index);
cta_buffer[lane] = local_count;
int offset = scan_warp<exclusive>(cta_buffer, lane);
if (lane == 0) {
int old_global_count = atomicAdd(&global_count, total_warp);
cta_buffer[0] = old_global_count;
}
int old_global_count = cta_buffer[0];
for (int l = 0; l < local_count; ++l) {
storage_X[storage_index + offset + l] = points[l].x;
storage_Y[storage_index + offset + l] = points[l].y;
storage_Z[storage_index + offset + l] = points[l].z;
}
Point* pos = output.data + old_global_count + lane;
for (int idx = lane; idx < total_warp; idx += Warp::STRIDE, pos += Warp::STRIDE) {
float x = storage_X[storage_index + idx];
float y = storage_Y[storage_index + idx];
float z = storage_Z[storage_index + idx];
*pos = make_float4(x, y, z, 0.f);
}
bool full = (old_global_count + total_warp) >= output.size;
if (full)
break;
}
} /* for(int z = 0; z < volume.dims.z - 1; ++z) */
///////////////////////////
// prepare for future scans
if (ftid == 0) {
unsigned int total_blocks = gridDim.x * gridDim.y * gridDim.z;
unsigned int value = atomicInc(&blocks_done, total_blocks);
// last block
if (value == total_blocks - 1) {
output_count = min((int) output.size, global_count);
blocks_done = 0;
global_count = 0;
}
}
}
};
__global__ void extract_kernel(const FullScan6 fs, PtrSz<Point> output) { fs(output); }
struct ExtractNormals {
typedef float8 float8;
TsdfVolume volume;
PtrSz<Point> points;
float3 voxel_size_inv;
float3 gradient_delta;
Aff3f aff;
Mat3f Rinv;
ExtractNormals(const TsdfVolume& vol) : volume(vol) {
voxel_size_inv.x = 1.f / volume.voxel_size.x;
voxel_size_inv.y = 1.f / volume.voxel_size.y;
voxel_size_inv.z = 1.f / volume.voxel_size.z;
}
__kf_device__ int3 getVoxel(const float3& p) const {
// rounding to nearest even
int x = __float2int_rn(p.x * voxel_size_inv.x);
int y = __float2int_rn(p.y * voxel_size_inv.y);
int z = __float2int_rn(p.z * voxel_size_inv.z);
return make_int3(x, y, z);
}
__kf_device__ void operator()(float4* output) const {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= points.size)
return;
const float qnan = numeric_limits<float>::quiet_NaN();
float3 n = make_float3(qnan, qnan, qnan);
float3 point = Rinv * (tr(points.data[idx]) - aff.t);
int3 g = getVoxel(point);
if (g.x > 1 && g.y > 1 && g.z > 1 && g.x < volume.dims.x - 2 && g.y < volume.dims.y - 2 &&
g.z < volume.dims.z - 2) {
float3 t;
t = point;
t.x += gradient_delta.x;
;
float Fx1 = interpolate(volume, t * voxel_size_inv);
t = point;
t.x -= gradient_delta.x;
float Fx2 = interpolate(volume, t * voxel_size_inv);
n.x = __fdividef(Fx1 - Fx2, gradient_delta.x);
t = point;
t.y += gradient_delta.y;
float Fy1 = interpolate(volume, t * voxel_size_inv);
t = point;
t.y -= gradient_delta.y;
float Fy2 = interpolate(volume, t * voxel_size_inv);
n.y = __fdividef(Fy1 - Fy2, gradient_delta.y);
t = point;
t.z += gradient_delta.z;
float Fz1 = interpolate(volume, t * voxel_size_inv);
t = point;
t.z -= gradient_delta.z;
float Fz2 = interpolate(volume, t * voxel_size_inv);
n.z = __fdividef(Fz1 - Fz2, gradient_delta.z);
n = normalized(aff.R * n);
}
output[idx] = make_float4(n.x, n.y, n.z, 0);
}
};
__global__ void extract_normals_kernel(const ExtractNormals en, float4* output) { en(output); }
} // namespace device
} // namespace kfusion
size_t kfusion::device::extractCloud(const TsdfVolume& volume, const Aff3f& aff, PtrSz<Point> output) {
typedef FullScan6 FS;
FS fs(volume);
fs.aff = aff;
dim3 block(FS::CTA_SIZE_X, FS::CTA_SIZE_Y);
dim3 grid(divUp(volume.dims.x, block.x), divUp(volume.dims.y, block.y));
extract_kernel<<<grid, block>>>(fs, output);
cudaSafeCall(cudaGetLastError());
cudaSafeCall(cudaDeviceSynchronize());
int size;
cudaSafeCall(cudaMemcpyFromSymbol(&size, output_count, sizeof(size)));
return (size_t) size;
}
void kfusion::device::extractNormals(const TsdfVolume& volume, const PtrSz<Point>& points, const Aff3f& aff,
const Mat3f& Rinv, float gradient_delta_factor, float4* output) {
ExtractNormals en(volume);
en.points = points;
en.gradient_delta = volume.voxel_size * gradient_delta_factor;
en.aff = aff;
en.Rinv = Rinv;
dim3 block(256);
dim3 grid(divUp((int) points.size, block.x));
extract_normals_kernel<<<grid, block>>>(en, output);
cudaSafeCall(cudaGetLastError());
cudaSafeCall(cudaDeviceSynchronize());
}
|
ea42993b981e7771cf1cc76947df86af88a9a976.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <cstdlib>
#include <device_launch_parameters.h>
#include <hip/hip_runtime.h>
__global__
void initWith(float num, float *a, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < N; i += stride)
{
a[i] = num;
}
}
__global__
void addVectorsInto(float *result, float *a, float *b, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < N; i += stride)
{
result[i] = a[i] + b[i];
}
}
void checkElementsAre(float target, float *vector, int N)
{
for (int i = 0; i < N; i++)
{
if (vector[i] != target)
{
printf("FAIL: vector[%d] - %0.0f does not equal %0.0f\n", i, vector[i], target);
exit(1);
}
}
printf("Success! All values calculated correctly.\n");
}
int main()
{
int deviceId;
int numberOfSMs;
hipGetDevice(&deviceId);
hipDeviceGetAttribute(&numberOfSMs, hipDeviceAttributeMultiprocessorCount, deviceId);
const int N = 2 << 24;
size_t size = N * sizeof(float);
float *a;
float *b;
float *c;
hipMallocManaged(&a, size);
hipMallocManaged(&b, size);
hipMallocManaged(&c, size);
hipMemPrefetchAsync(a, size, deviceId);
hipMemPrefetchAsync(b, size, deviceId);
hipMemPrefetchAsync(c, size, deviceId);
int threadsPerBlock;
int numberOfBlocks;
threadsPerBlock = 256;
numberOfBlocks = 32 * numberOfSMs;
hipError_t addVectorsErr;
hipError_t asyncErr;
/*
* Create 3 streams to run initialize the 3 data vectors in parallel.
*/
hipStream_t stream1, stream2, stream3;
hipStreamCreate(&stream1);
hipStreamCreate(&stream2);
hipStreamCreate(&stream3);
/*
* Give each `initWith` launch its own non-standard stream.
*/
initWith << <numberOfBlocks, threadsPerBlock, 0, stream1 >> > (3, a, N);
initWith << <numberOfBlocks, threadsPerBlock, 0, stream2 >> > (4, b, N);
initWith << <numberOfBlocks, threadsPerBlock, 0, stream3 >> > (0, c, N);
addVectorsInto << <numberOfBlocks, threadsPerBlock >> > (c, a, b, N);
addVectorsErr = hipGetLastError();
if (addVectorsErr != hipSuccess) printf("Error: %s\n", hipGetErrorString(addVectorsErr));
asyncErr = hipDeviceSynchronize();
if (asyncErr != hipSuccess) printf("Error: %s\n", hipGetErrorString(asyncErr));
hipMemPrefetchAsync(c, size, hipCpuDeviceId);
checkElementsAre(7, c, N);
/*
* Destroy streams when they are no longer needed.
*/
hipStreamDestroy(stream1);
hipStreamDestroy(stream2);
hipStreamDestroy(stream3);
hipFree(a);
hipFree(b);
hipFree(c);
}
|
ea42993b981e7771cf1cc76947df86af88a9a976.cu
|
#include <stdio.h>
#include <cstdlib>
#include <device_launch_parameters.h>
#include <cuda_runtime.h>
__global__
void initWith(float num, float *a, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < N; i += stride)
{
a[i] = num;
}
}
__global__
void addVectorsInto(float *result, float *a, float *b, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < N; i += stride)
{
result[i] = a[i] + b[i];
}
}
void checkElementsAre(float target, float *vector, int N)
{
for (int i = 0; i < N; i++)
{
if (vector[i] != target)
{
printf("FAIL: vector[%d] - %0.0f does not equal %0.0f\n", i, vector[i], target);
exit(1);
}
}
printf("Success! All values calculated correctly.\n");
}
int main()
{
int deviceId;
int numberOfSMs;
cudaGetDevice(&deviceId);
cudaDeviceGetAttribute(&numberOfSMs, cudaDevAttrMultiProcessorCount, deviceId);
const int N = 2 << 24;
size_t size = N * sizeof(float);
float *a;
float *b;
float *c;
cudaMallocManaged(&a, size);
cudaMallocManaged(&b, size);
cudaMallocManaged(&c, size);
cudaMemPrefetchAsync(a, size, deviceId);
cudaMemPrefetchAsync(b, size, deviceId);
cudaMemPrefetchAsync(c, size, deviceId);
int threadsPerBlock;
int numberOfBlocks;
threadsPerBlock = 256;
numberOfBlocks = 32 * numberOfSMs;
cudaError_t addVectorsErr;
cudaError_t asyncErr;
/*
* Create 3 streams to run initialize the 3 data vectors in parallel.
*/
cudaStream_t stream1, stream2, stream3;
cudaStreamCreate(&stream1);
cudaStreamCreate(&stream2);
cudaStreamCreate(&stream3);
/*
* Give each `initWith` launch its own non-standard stream.
*/
initWith << <numberOfBlocks, threadsPerBlock, 0, stream1 >> > (3, a, N);
initWith << <numberOfBlocks, threadsPerBlock, 0, stream2 >> > (4, b, N);
initWith << <numberOfBlocks, threadsPerBlock, 0, stream3 >> > (0, c, N);
addVectorsInto << <numberOfBlocks, threadsPerBlock >> > (c, a, b, N);
addVectorsErr = cudaGetLastError();
if (addVectorsErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(addVectorsErr));
asyncErr = cudaDeviceSynchronize();
if (asyncErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(asyncErr));
cudaMemPrefetchAsync(c, size, cudaCpuDeviceId);
checkElementsAre(7, c, N);
/*
* Destroy streams when they are no longer needed.
*/
cudaStreamDestroy(stream1);
cudaStreamDestroy(stream2);
cudaStreamDestroy(stream3);
cudaFree(a);
cudaFree(b);
cudaFree(c);
}
|
5fc307e9875a71a751578a6746a4b44a3060939a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <cstdlib>
#include <cstdio>
#include <string.h>
#include <GL/glut.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include <helper_cuda_gl.h>
#include <helper_cuda_drvapi.h>
#include <helper_image.h>
#include <helper_math.h>
#include <helper_string.h>
#include <helper_timer.h>
#include <cuda_gl_interop.h>
//#include "spinKernel.cu"
#include "radixsort.cu"
//#include "dSimDataTypes.h"
#define PI 3.14159265358979f
#define TWOPI 6.28318530717959f
///////////////////////////////////////////////////////////////////////////////////////
// File name: spinKernel.cu
// Description: Kernel for spin computations using GPU
///////////////////////////////////////////////////////////////////////////////////////
#ifndef _SPIN_KERNEL_H_
#define _SPIN_KERNEL_H_
#include <stdio.h>
#include <math.h>
#include <helper_math.h>
#include "math_constants.h"
#include "hip/hip_runtime.h"
#include "options.h"
#include "dSimDataTypes.h"
//////////////////////////////////////////////////////////////////////////////////
// Define texture arrays and constants, copied to device from host.
//////////////////////////////////////////////////////////////////////////////////
texture<uint,1,hipReadModeElementType> texCubeCounter;
texture<uint,1,hipReadModeElementType> texTrianglesInCubes;
//texture<uint,1,hipReadModeElementType> texTrgls;
texture<float,1,hipReadModeElementType> texVertices;
texture<float,1,hipReadModeElementType> texTriangleHelpers;
texture<float,1,hipReadModeElementType> texRTreeArray;
texture<uint,1,hipReadModeElementType> texCombinedTreeIndex;
texture<uint,1,hipReadModeElementType> texTriInfo;
__device__ uint k_reflectionType;
__device__ uint k_triSearchMethod;
__device__ uint k_numCubes;
__device__ uint k_totalNumCubes;
__device__ uint k_maxTrianglesPerCube;
__device__ float k_cubeLength;
__device__ uint k_nFibers;
__device__ uint k_nCompartments;
__device__ float k_permeability;
__device__ float k_deltaTime;
__device__ float *k_T2Values;
__device__ float *k_stdDevs;
__device__ float gradientX;
__device__ float gradientY;
__device__ float gradientZ;
__device__ uint m_dNumSpins;
__device__ int interations;
typedef unsigned int uint;
/////////////////////////////////////////////////////////////////////////////////////
// The structure collResult will be used to store outcomes from checks of whether
// collision occurs between a ray and a triangle.
/////////////////////////////////////////////////////////////////////////////////////
typedef struct _collResult
{
uint collisionType; // 0 if no collision, 1 if collision within triangle, 2 if collision with triangle edge, 3 if collision with triangle vertex
float3 collPoint; // Point of collision with triangle
uint collIndex; // Index of collision triangle
float collDistSq; // Distance squared from starting point to collision point
}collResult;
// Some simple vector ops for float3's (dot and length are defined in cudautil_math)
//#define dot(u,v) ((u).x * (v).x + (u).y * (v).y + (u).z * (v).z)
//#define length(v) sqrt(dot(v,v)) // norm (vector length)
#define d(u,v) length(u-v) // distance (norm of difference)
//////////////////////////////////////////////////////////////////////////
// Function name: point_line_dist
// Description: Returns the shortest distance from a point P to a
// line defined by two points (LP1 and LP2)
//////////////////////////////////////////////////////////////////////////
__device__ float point_line_dist(float3 P, float3 LP1, float3 LP2){
float3 v = LP2-LP1;
float b = dot(P-LP1,v)/dot(v,v);
return d(P,LP1+b*v);
}
///////////////////////////////////////////////////////////////////////////
// Function name: point_seg_dist
// Description: Returns the shortest distance from a point P to a
// line segment defined by two points (SP1 and SP2)
///////////////////////////////////////////////////////////////////////////
__device__ float point_seg_dist(float3 P, float3 SP1, float3 SP2){
float3 v = SP2-SP1;
float c1 = dot(P-SP1,v);
if (c1<=0) return d(P,SP1);
float c2 = dot(v,v);
if (c2<=c1) return d(P,SP2);
float3 Pb = SP1 + c1/c2*v;
return d(P,Pb);
}
//////////////////////////////////////////////////////////////////////////////
// Function name: boxMuller
// Description: Generates a pair of independent standard normally
// distributed random numbers from a pair of
// uniformly distributed random numbers, using the basic form
// of the Box-Muller transform
// (see http://en.wikipedia.org/wiki/Box%E2%80%93Muller_transform)
//////////////////////////////////////////////////////////////////////////////
__device__ void boxMuller(float& u1, float& u2){
float r = sqrtf(-2.0f * __logf(u1));
float phi = TWOPI * u2;
u1 = r * __cosf(phi);
u2 = r * __sinf(phi);
}
//////////////////////////////////////////////////////////////////////////////
// Function name: myRand
// Description: Simple multiply-with-carry PRNG that uses two seeds
// (seed[0] and seed[1]) (Algorithm from George Marsaglia:
// http://en.wikipedia.org/wiki/George_Marsaglia)
//////////////////////////////////////////////////////////////////////////////
//__device__ uint myRand(uint seed[]){
// seed[0] = 36969 * (seed[0] & 65535) + (seed[0] >> 16);
// seed[1] = 18000 * (seed[1] & 65535) + (seed[1] >> 16);
// return (seed[0] << 16) + seed[1];
//}
__device__ uint myRand(uint2 &seed){
seed.x = 36969 * (seed.x & 65535) + (seed.x >> 16);
seed.y = 18000 * (seed.y & 65535) + (seed.y >> 16);
return (seed.x << 16) + seed.y;
}
/////////////////////////////////////////////////////////////////////////////
// Function name: myRandf
// Description: Returns a random float r in the range 0<=r<=1
/////////////////////////////////////////////////////////////////////////////
//__device__ float myRandf(uint seed[]){
// return ((float)myRand(seed) / 4294967295.0f);
//}
/////////////////////////////////////////////////////////////////////////////
// Function name: myRandDir
// Description: Return a vector with a specified magnitude (adc) and
// a random direction
/////////////////////////////////////////////////////////////////////////////
//__device__ void myRandDir(uint seed[], float adc, float3& vec){
// // Azimuth and elevation are on the interval [0,2*pi]
// // (2*pi)/4294967294.0 = 1.4629181e-09f
// float az = (float)myRand(seed) * 1.4629181e-09f;
// float el = (float)myRand(seed) * 1.4629181e-09f;
// vec.z = adc * __sinf(el);
// float rcosel = adc * __cosf(el);
// vec.x = rcosel * __cosf(az);
// vec.y = rcosel * __sinf(az);
// return;
//}
//////////////////////////////////////////////////////////////////////////////
// Function name: myRandn
// Description: Returns three normally distributed random numbers
// and one uniformly distributed random number.
//////////////////////////////////////////////////////////////////////////////
/*__device__ void myRandn(uint seed[], float& n1, float& n2, float& n3, float& u){
// We want random numbers in the range (0,1], i.e. 0<n<=1
n1 = ((float)myRand(seed) + 1.0f) / 4294967296.0f;
n2 = ((float)myRand(seed) + 1.0f) / 4294967296.0f;
n3 = ((float)myRand(seed) + 1.0f) / 4294967296.0f;
u = ((float)myRand(seed) + 1.0f) / 4294967296.0f;
// Note that ULONG_MAX=4294967295
float n4 = u;
boxMuller(n1,n2);
boxMuller(n3,n4);
return;
}*/
__device__ void myRandn(uint2 &seed, float& n1, float& n2, float& n3, float& u){
// We want random numbers in the range (0,1], i.e. 0<n<=1
n1 = ((float)myRand(seed) + 1.0f) / 4294967296.0f;
n2 = ((float)myRand(seed) + 1.0f) / 4294967296.0f;
n3 = ((float)myRand(seed) + 1.0f) / 4294967296.0f;
u = ((float)myRand(seed) + 1.0f) / 4294967296.0f;
// Note that ULONG_MAX=4294967295
float n4 = u;
boxMuller(n1,n2);
boxMuller(n3,n4);
return;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Function name: calcCubePosGPU // Rename later to calcCubePos(...)
// Description: Function calculates the cube cell to which the given position belongs in uniform cube.
// Converts a position coordinate (ranging from (-1,-1,-1) to (1,1,1) to a cube
// coordinate (ranging from (0,0,0) to (m_numCubes-1, m_numCubes-1, m_numCubes-1)).
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
__device__ uint3 calcCubePosGPU(float3 p){
uint3 cubePos;
cubePos.x = floor((p.x + 1.0f) / k_cubeLength);
cubePos.y = floor((p.y + 1.0f) / k_cubeLength);
cubePos.z = floor((p.z + 1.0f) / k_cubeLength);
cubePos.x = max(0, min(cubePos.x, k_numCubes-1));
cubePos.y = max(0, min(cubePos.y, k_numCubes-1));
cubePos.z = max(0, min(cubePos.z, k_numCubes-1));
return cubePos;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Function name: calcCubeHashGPU // Rename later to calcCubeHash(...)
// Description: Calculate address in cube from position (clamping to edges)
///////////////////////////////////////////////////////////////////////////////////////////////////////////////
__device__ uint calcCubeHashGPU(uint3 cubePos){
return cubePos.z * k_numCubes * k_numCubes + cubePos.y * k_numCubes + cubePos.x;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Function name: reflectPos
// Description: Given a particle that tries to travel from startPos to targetPos, but collides with triangle
// number collTriIndex at collPos, we calculate the position which the particle gets reflected to.
// This applies if reflectionType==1. If reflectionType==0, we do a simplified reflection,
// where the particle just gets reflected to its original position. This is also done if we hit
// a triangle edge or a triangle vertex (which gives collisionType==2 or collisionTYpe==3).
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__device__ float3 reflectPos(float3 startPos, float3 targetPos, float3 collPos, uint collTriIndex, uint collisionType){
float3 reflectedPos;
if ((k_reflectionType==0)|(collisionType>1)){ // We simply reflect back to the starting point
reflectedPos = startPos;
} else { // We reflect the target point through the triangle - see http://en.wikipedia.org/wiki/Transformation_matrix
float3 sPosShifted = targetPos-collPos;
float3 normalVec;
normalVec = make_float3(tex1Dfetch(texTriangleHelpers,collTriIndex*12+0),tex1Dfetch(texTriangleHelpers,collTriIndex*12+1),tex1Dfetch(texTriangleHelpers,collTriIndex*12+2));
reflectedPos.x = (1-2*normalVec.x*normalVec.x)*sPosShifted.x - 2*normalVec.x*normalVec.y*sPosShifted.y - 2*normalVec.x*normalVec.z*sPosShifted.z + collPos.x;
reflectedPos.y = -2*normalVec.x*normalVec.y*sPosShifted.x + (1-2*normalVec.y*normalVec.y)*sPosShifted.y - 2*normalVec.y*normalVec.z*sPosShifted.z + collPos.y;
reflectedPos.z = -2*normalVec.x*normalVec.z*sPosShifted.x - 2*normalVec.y*normalVec.z*sPosShifted.y + (1-2*normalVec.z*normalVec.z)*sPosShifted.z + collPos.z;
}
return reflectedPos;
}
//////////////////////////////////////////////////////////////////////////////////////////////
// Function name: triCollDetect
// Description: Find whether the path from oPos to pos intersects triangle no. triIndex.
// Returns the collision result, which consists of
// result.collPoint = the collision/intersection point between
// the ray and the triangle.
// result.collIndex = the index of the collision triangle if
// collision occurs
// result.collisionType = 0 if no collision, 1 within triangle boundaries,
// 2 if collision with triangle edge, 3 if
// collision with triangle vertex
// result.collDistSq = the distance (squared) from oPos to
// the collision point.
//////////////////////////////////////////////////////////////////////////////////////////////
__device__ collResult triCollDetect(float3 oPos, float3 pos, uint triIndex){
uint firstPointIndex;
float uv, uu, vv, wu, wv, r, s, t, stDen;
float3 triP1, d, w, n, u, v, collPoint;
collResult result;
result.collisionType = 0;
// firstPointIndex is the index of the "first" point in the triangle
firstPointIndex = tex1Dfetch(texTriInfo, triIndex*3+2);
// triP1 holds the coordinates of the first point
triP1 = make_float3(tex1Dfetch(texVertices,firstPointIndex*3+0),tex1Dfetch(texVertices,firstPointIndex*3+1),tex1Dfetch(texVertices,firstPointIndex*3+2));
// n: normal to the triangle. u: vector from first point to second point. v: vector from first point to third point. uv, uu, vv: dot products.
n = make_float3(tex1Dfetch(texTriangleHelpers,triIndex*12+0),tex1Dfetch(texTriangleHelpers,triIndex*12+1),tex1Dfetch(texTriangleHelpers,triIndex*12+2));
u = make_float3(tex1Dfetch(texTriangleHelpers,triIndex*12+3),tex1Dfetch(texTriangleHelpers,triIndex*12+4),tex1Dfetch(texTriangleHelpers,triIndex*12+5));
v = make_float3(tex1Dfetch(texTriangleHelpers,triIndex*12+6),tex1Dfetch(texTriangleHelpers,triIndex*12+7),tex1Dfetch(texTriangleHelpers,triIndex*12+8));
uv = tex1Dfetch(texTriangleHelpers,triIndex*12+9);
uu = tex1Dfetch(texTriangleHelpers,triIndex*12+10);
vv = tex1Dfetch(texTriangleHelpers,triIndex*12+11);
// First find whether the path intersects the plane defined by triangle i. See method at http://softsurfer.com/Archive/algorithm_0105/algorithm_0105.htm
r = dot(n,triP1-oPos)/dot(n,pos-oPos);
if ((0<r)&(r<1)){
// Then find if the path intersects the triangle itself. See method at http://softsurfer.com/Archive/algorithm_0105/algorithm_0105.htm
d = r*(pos-oPos);
collPoint = oPos + d;
w = collPoint-triP1;
wu = dot(w,u);
wv = dot(w,v);
stDen = uv*uv-uu*vv;
s = (uv*wv-vv*wu)/stDen;
t = (uv*wu-uu*wv)/stDen;
if ( (s>=0)&(t>=0)&(s+t<=1) ){ // We have a collision with the triangle
result.collDistSq = dot(d,d);
result.collIndex = triIndex;
result.collPoint = collPoint;
result.collisionType = 1;
if ( (s==0)|(t==0)|(s+t==1) ){ // The collision point is on a triangle edge
result.collisionType = 2;
if ( ((s==0)&(t==0))|((s==0)&(t==1))|((s==1)&(t==0)) ){ // The collision point is on a triangle vertex
result.collisionType = 3;
}
}
}
}
return result;
}
/////////////////////////////////////////////////////////////////////////////////////////
// Function name: SearchRTreeArray
// Description: Find the leaf rectangles in the R-Tree which intersect the rectangle
// rect (=[x_min,y_min,z_min,x_max,y_max,z_max]). Normally, rect will
// be a bounding rectangle for a particle path and the leaf rectangles
// of the R-Tree will be bounding rectangles for fiber triangles.
// When the rectangles intersect, that means the particle might collide
// with the triangle. The indices of such triangles are written into
// intersectArray (to be further checked for actual collisions), and
// the number of intersecting rectangles is returned in the output
// foundCount.
/////////////////////////////////////////////////////////////////////////////////////////
__device__ uint SearchRTreeArray(float* rect, uint* interSectArray, uint8 &compartment, uint16 &fiberInside){
uint foundCount = 0;
uint stack[100]; // Maximum necessary stack size should be 1+7*(treeHeight) = 1+7*(n_levels-1). 100 should suffice for n_levels <= 15 - very big tree
int stackIndex = 0;
//printf("k_nFibers: %u\n", k_nFibers);
//printf("k_nCompartments: %u\n", k_nCompartments);
//uint k_nFibers = 17, k_nCompartments = 3;
//stack[stackIndex] = 0;
if (compartment != 0){ // We push the location of the root node onto the stack
stack[stackIndex] = tex1Dfetch(texCombinedTreeIndex,fiberInside*(k_nCompartments-1)+compartment); // = 0 for "first" tree, i.e. tree corresponding to innermost compartment
} else{
stack[stackIndex] = tex1Dfetch(texCombinedTreeIndex,0);
//printf("k_nFibers: %u\n", k_nFibers);
//printf("k_nCompartments: %u\n", k_nCompartments);
//printf("StackIndex: %u\n", stackIndex);
//printf("Stack for compartment %u: %i\n", compartment, stack[stackIndex]);
}
//printf("(in spinKernel.cu::SearchRTreeArray): rect: [%g,%g,%g,%g,%g,%g]\n", rect[0],rect[1],rect[2],rect[3],rect[4],rect[5]);
//printf("(in spinKernel.cu::SearchRTreeArray): stack[%i]: %u\n", stackIndex, stack[stackIndex]);
stackIndex++;
uint currentNodeIndex;
while (stackIndex > 0){ // Stop when we've emptied the stack
stackIndex--; // Pop the top node off the stack
currentNodeIndex = stack[stackIndex];
//printf("(in spinKernel.cu::SearchRTreeArray): currentNodeIndex: %u\n", currentNodeIndex);
for (int m=tex1Dfetch(texRTreeArray,currentNodeIndex+1)-1; m>=0; m--){
uint currentBranchIndex = currentNodeIndex+2 + m*7;
//printf("(in spinKernel.cu::SearchRTreeArray): m: %u\n", m);
//printf("(in spinKernel.cu::SearchRTreeArray): currentBranchIndex: %u\n", currentBranchIndex);
//See if the branch rectangle overlaps with the input rectangle
if (!( tex1Dfetch(texRTreeArray,currentBranchIndex+1) > rect[3] || // branchRect.x_min > rect.x_max
tex1Dfetch(texRTreeArray,currentBranchIndex+2) > rect[4] || // branchRect.y_min > rect.y_max
tex1Dfetch(texRTreeArray,currentBranchIndex+3) > rect[5] || // branchRect.z_min > rect.z_max
rect[0] > tex1Dfetch(texRTreeArray,currentBranchIndex+4) || // rect.x_min > branchRect.x_max
rect[1] > tex1Dfetch(texRTreeArray,currentBranchIndex+5) || // rect.y_min > branchRect.y_max
rect[2] > tex1Dfetch(texRTreeArray,currentBranchIndex+6) )) // rect.z_min > branchRect.z_max
{
if (tex1Dfetch(texRTreeArray,currentNodeIndex) > 0){ // We are at an internal node - push the node pointed to in the branch onto the stack
stack[stackIndex] = tex1Dfetch(texRTreeArray,currentBranchIndex);
stackIndex++;
//printf("(in spinKernel.cu::SearchRTreeArray): stackIndex: %i\n", stackIndex);
} else {
interSectArray[foundCount] = tex1Dfetch(texRTreeArray,currentBranchIndex); // We are at a leaf - store corresponding triangle index
foundCount++;
//printf("(in spinKernel.cu::SearchRTreeArray): Tree rectangle: [%g,%g,%g,%g,%g,%g]\n", tex1Dfetch(texRTreeArray,currentBranchIndex+1), tex1Dfetch(texRTreeArray,currentBranchIndex+2),
//tex1Dfetch(texRTreeArray,currentBranchIndex+3), tex1Dfetch(texRTreeArray,currentBranchIndex+4), tex1Dfetch(texRTreeArray,currentBranchIndex+5),
//tex1Dfetch(texRTreeArray,currentBranchIndex+6));
}
}
}
}
return foundCount;
}
//////////////////////////////////////////////////////////////////////////////////////////
// Function name: collDetectRTree
// Description: See whether a particle trying to go from startPos to targetPos
// collides with any triangle in the mesh, using the R-Tree. Return
// the final position of the particle.
//////////////////////////////////////////////////////////////////////////////////////////
__device__ float3 collDetectRTree(float3 startPos, float3 targetPos, float u, uint8 &compartment, uint16 &fiberInside){
float3 endPos = targetPos;
uint hitArray[1200]; // Hitarray will store the indices of the triangles that the particle possible collides with - we are assuming no more than 100
float spinRectangle[6];
collResult result, tempResult;
//float minCollDistSq;
result.collDistSq = 400000000; // Some really large number, will use this to store the smallest distance to a collision point
result.collisionType = 1;
result.collIndex = UINT_MAX;
uint excludedTriangle = UINT_MAX;
float u_max = 1, u_min = 0;
//uint k = 0;
//uint p = 0;
//printf("Compartment: %i\n", compartment);
while (result.collisionType>0){ // If we have detected a collision, we repeat the collision detection for the new, reflected path
//minCollDistSq = 400000000;
//printf("p: %u\n", p);
//p++;
result.collisionType = 0; // First assume that the particle path does not experience any collisions
// Define a rectangle that bounds the particle path from corner to corner
// Finding minx, miny, minz
spinRectangle[0] = startPos.x; if (targetPos.x < spinRectangle[0]){spinRectangle[0] = targetPos.x;}
spinRectangle[1] = startPos.y; if (targetPos.y < spinRectangle[1]){spinRectangle[1] = targetPos.y;}
spinRectangle[2] = startPos.z; if (targetPos.z < spinRectangle[2]){spinRectangle[2] = targetPos.z;}
// Finding maxx, maxy, maxz
spinRectangle[3] = startPos.x; if (targetPos.x > spinRectangle[3]){spinRectangle[3] = targetPos.x;}
spinRectangle[4] = startPos.y; if (targetPos.y > spinRectangle[4]){spinRectangle[4] = targetPos.y;}
spinRectangle[5] = startPos.z; if (targetPos.z > spinRectangle[5]){spinRectangle[5] = targetPos.z;}
// Find the triangles whose bounding rectangles intersect spinRectangle. They are written to hitArray and their number is nHits.
int nHits = SearchRTreeArray(spinRectangle, hitArray, compartment, fiberInside);
//int nHits = 0;
//printf("(in spinKernel.cu::collDetectRTree): nHits: %i\n", nHits);
//printf("(in spinKernel.cu::collDetectRTree): Startpos: [%g,%g,%g]\n", startPos.x, startPos.y, startPos.z);
//printf("(in spinKernel.cu::collDetectRTree): Targetpos: [%g,%g,%g]\n", targetPos.x, targetPos.y, targetPos.z);
//printf("(in spinKernel.cu::collDetectRTree): Compartment: %i\n", compartment);
//printf("(in spinKernel.cu::collDetectRTree): Fiber: %u\n", fiberInside);
//printf("(in spinKernel.cu::collDetectRTree): Excluded triangle: %u\n", excludedTriangle);
//printf("(in spinKernel.cu::collDetectRTree): result.collDistSq: %g\n", result.collDistSq);
// Loop through the triangles in hitArray, see if we have collisions, store the closest collision point in the variable result.
for (uint k=0; k<nHits; k++){
uint triIndex = hitArray[k];
//printf("(in spinKernel.cu::collDetectRTree): hitArray[%u]: %u\n", k, hitArray[k]);
if (triIndex != excludedTriangle){
tempResult = triCollDetect(startPos, targetPos, triIndex);
//if ((tempResult.collisionType>0) & (tempResult.collDistSq < result.collDistSq)){
if ((tempResult.collisionType>0) & (tempResult.collDistSq < result.collDistSq)){
result = tempResult;
//minCollDistSq = tempResult.collDistSq;
}
}
}
// If we have a collision, then we find the resulting point which the particle gets reflected to.
if (result.collisionType>0){
//printf("*\n");
//printf("(in spinKernel.cu::collDetectRTree): Collision!\n");
//printf("(in spinKernel.cu::collDetectRTree): startPos: [%g,%g,%g]\n", startPos.x,startPos.y,startPos.z);
//printf("(in spinKernel.cu::collDetectRTree): targetPos: [%g,%g,%g]\n", targetPos.x,targetPos.y,targetPos.z);
//printf("(in spinKernel.cu::collDetectRTree): Collision point: [%g,%g,%g]\n", result.collPoint.x, result.collPoint.y, result.collPoint.z);
//printf("(in spinKernel.cu::collDetectRTree): Endpos (before assignment): [%g,%g,%g]\n", endPos.x, endPos.y, endPos.z);
//printf("(in spinKernel.cu::collDetectRTree): Collision triangle index: %u\n", result.collIndex);
//printf("(in spinKernel.cu::collDetectRTree): Collision fiber index: %u\n", tex1Dfetch(texTriInfo, result.collIndex*3+0));
//printf("(in spinKernel.cu::collDetectRTree): Collision membrane index: %u\n", tex1Dfetch(texTriInfo, result.collIndex*3+1));
//printf("(in spinKernel.cu::collDetectRTree): u: %g\n", u);
//printf("(in spinKernel.cu::collDetectRTree): u_max: %g, u_min: %g, u_p: %g\n", u_max, u_min, u_max-(u_max-u_min)*k_permeability);
// If u>u_max-(u_max-u_min)*k_permeability, then the particle permeates through the membrane and does not get reflected.
// u is in the range (0,1].
if (u<=u_max-(u_max-u_min)*k_permeability){ // The spin does not permeate the membrane
endPos = reflectPos(startPos, targetPos, result.collPoint, result.collIndex, result.collisionType);
u_max = u_max-(u_max-u_min)*k_permeability;
//printf("(in spinKernel.cu::collDetectRTree): Particle bounces off membrane\n");
//printf("(in spinKernel.cu::collDetectRTree): Endpos: [%g,%g,%g]\n", endPos.x, endPos.y, endPos.z);
//reflectPos(startPos, targetPos, result.collPoint, result.collIndex, result.collisionType);
} else{ // The spin permeates the membrane
u_min = u_max-(u_max-u_min)*k_permeability;
// Change the compartment (and fiber, if appropriate) assignment of the spin
// uint membraneType = tex1Dfetch(texTriInfo, result.collIndex*3+1);
if (compartment == 2){
if (tex1Dfetch(texTriInfo, result.collIndex*3+1) == 0){ // We are going from compartment 2 through axon surface - new compartment is 1
compartment = 1;
} else { // We are going from compartment 2 through myelin surface - new compartment is 0
compartment = 0;
fiberInside = UINT16_MAX;
}
} else if (compartment == 1){
compartment = 2; // We are going from compartment 1 through axon surface - new compartment is 2
} else if (compartment == 3){
compartment = 0; // We are going from compartment 3 through glia surface - new compartment is 0
fiberInside = UINT16_MAX;
} else {
fiberInside = tex1Dfetch(texTriInfo, result.collIndex*3+0);
if (tex1Dfetch(texTriInfo, result.collIndex*3+1) == 1){ // We are going from compartment 0 through myelin surface - new compartment is 2
compartment = 2;
} else { // We are going from compartment 0 through glia surface - new compartment is 3
compartment = 3;
}
}
//printf("(in spinKernel.cu::collDetectRTree): Particle permeates membrane\n");
//printf("(in spinKernel.cu::collDetectRTree): Endpos: [%g,%g,%g]\n", endPos.x, endPos.y, endPos.z);
}
}
// Redefine the start and end points for the reflected path, then repeat until no collision is detected.
startPos = result.collPoint;
targetPos = endPos;
excludedTriangle = result.collIndex; // Make sure we don't detect a collision with the triangle which the particle bounces from
result.collDistSq = 400000000;
}
return endPos;
}
/////////////////////////////////////////////////////////////////////////////////////////////
// Function name: cubeCollDetect
// Description: Determine whether a particle traveling from oPos to pos experiences
// a collision with any of the triangles in cube no. cubeIndex. Triangle
// no. excludedTriangle is not checked - useful if the particle is bouncing
// off that triangle.
/////////////////////////////////////////////////////////////////////////////////////////////
__device__ collResult cubeCollDetect(float3 oPos, float3 pos, uint cubeIndex, uint excludedTriangle, uint* trianglesInCubes, uint* cubeCounter){
uint triIndex, k_max;
collResult result, testCollision;
result.collisionType = 0;
result.collDistSq = 400000000;
result.collIndex = UINT_MAX;
// Loop through membrane types (layers) as appropriate
//for (uint layerIndex = 0; layerIndex < 2; layerIndex++){ // Change later so not to loop through all membrane types
//k_max = tex1Dfetch(texCubeCounter, layerIndex*k_totalNumCubes+cubeIndex); // k_max: the number of triangles in cube cubeIndex on membrane type layerIndex
//k_max = tex1Dfetch(texCubeCounter, cubeIndex);
//cubeIndex = 1275;
k_max = cubeCounter[cubeIndex];
//printf("cubeCounter[%u]: %u\n", cubeIndex, k_max);
for (uint k=0; k<k_max; k++){
// triIndex is the number of the triangle being checked.
//triIndex = tex1Dfetch(texTrianglesInCubes, (layerIndex*k_totalNumCubes+cubeIndex)*k_maxTrianglesPerCube+k);
// triIndex = tex1Dfetch(texTrianglesInCubes, cubeIndex*k_maxTrianglesPerCube+k);
triIndex = trianglesInCubes[cubeIndex*k_maxTrianglesPerCube+k];
//printf("Checking triangle %u\n", triIndex);
if (triIndex != excludedTriangle){
testCollision = triCollDetect(oPos, pos, triIndex);
if ( (testCollision.collisionType>0)&(testCollision.collDistSq<result.collDistSq) ){
result = testCollision;
}
}
}
//triIndex = tex1Dfetch(texTrianglesInCubes, cubeIndex*k_maxTrianglesPerCube+k);
//}
return result;
}
///////////////////////////////////////////////////////////////////////////////////////////////
// Function name: collDetectRectGrid
// Description: Determine whether a particle trying to go from startPos to targetPos
// collides with a triangle, using the method of a rectangular grid (as
// opposed to an R-Tree)
///////////////////////////////////////////////////////////////////////////////////////////////
__device__ float3 collDetectRectGrid(float3 startPos, float3 targetPos, float u, uint8 compartment, uint16 fiberInside, uint* trianglesInCubes, uint* cubeCounter){
//printf("RectGrid ....");
float3 endPos = targetPos;
collResult collCheck;
collCheck.collisionType = 1;
uint excludedTriangle = UINT_MAX, currCube;
uint3 currCubexyz, startCubexyz, endCubexyz;
int3 cubeIncrement;
float u_max = 1.0f, u_min = 0.0f;
while (collCheck.collisionType > 0){
//startCube = calcCubeHashGPU(calcCubePosGPU(startPos, k_cubeLength), k_numCubes); // The cube that the particle starts in
//endCube = calcCubeHashGPU(calcCubePosGPU(targetPos, k_cubeLength), k_numCubes); // The cube that the particle tries to end in
startCubexyz = calcCubePosGPU(startPos);
endCubexyz = calcCubePosGPU(targetPos);
cubeIncrement.x = ( (endCubexyz.x>startCubexyz.x) - (endCubexyz.x<startCubexyz.x) );
cubeIncrement.y = ( (endCubexyz.y>startCubexyz.y) - (endCubexyz.y<startCubexyz.y) );
cubeIncrement.z = ( (endCubexyz.z>startCubexyz.z) - (endCubexyz.z<startCubexyz.z) );
//printf("startCubexyz: [%u,%u,%u]\n", startCubexyz.x, startCubexyz.y, startCubexyz.z);
//printf("endCubexyz: [%u,%u,%u]\n", endCubexyz.x, endCubexyz.y, endCubexyz.z);
//printf("cubeIncrement: [%i,%i,%i]\n", cubeIncrement.x, cubeIncrement.y, cubeIncrement.z);
collCheck.collisionType = 0;
currCubexyz.x = startCubexyz.x;
do {
currCubexyz.y = startCubexyz.y;
do {
currCubexyz.z = startCubexyz.z;
do {
currCube = calcCubeHashGPU(currCubexyz);
//printf("currCubexyz: [%u,%u,%u]\n", currCubexyz.x, currCubexyz.y, currCubexyz.z);
collCheck = cubeCollDetect(startPos, targetPos, currCube, excludedTriangle, trianglesInCubes, cubeCounter);
currCubexyz.z += cubeIncrement.z;
} while ((currCubexyz.z != endCubexyz.z+cubeIncrement.z)&&(collCheck.collisionType == 0));
currCubexyz.y += cubeIncrement.y;
} while ((currCubexyz.y != endCubexyz.y+cubeIncrement.y)&&(collCheck.collisionType == 0));
currCubexyz.x += cubeIncrement.x;
} while ((currCubexyz.x != endCubexyz.x+cubeIncrement.x)&&(collCheck.collisionType == 0));
/*while ((currCubexyz.x != endCubexyz.x+cubeIncrement.x)&&(collCheck.collisionType == 0)){
while ((currCubexyz.y != endCubexyz.y+cubeIncrement.y)&&(collCheck.collisionType == 0)){
while ((currCubexyz.z != endCubexyz.z+cubeIncrement.z)&&(collCheck.collisionType == 0)){
currCubexyz.z += cubeIncrement.z;
currCube = calcCubeHashGPU(currCubexyz);
printf("currCubexyz: [%u,%u,%u]\n", currCubexyz.x, currCubexyz.y, currCubexyz.z);
collCheck = cubeCollDetect(startPos, targetPos, currCube, excludedTriangle, trianglesInCubes, cubeCounter);
}
currCubexyz.y += cubeIncrement.y;
}
currCubexyz.x += cubeIncrement.x;
}*/
if (collCheck.collisionType > 0){
printf("(in collDetectRectGrid): Collision!\n");
printf("(in collDetectRectGrid): Startpos: [%g,%g,%g]\n", startPos.x, startPos.y, startPos.z);
printf("(in collDetectRectGrid): Targetpos: [%g,%g,%g]\n", targetPos.x, targetPos.y, targetPos.z);
printf("(in collDetectRectGrid): Collision pos: [%g,%g,%g]\n", collCheck.collPoint.x, collCheck.collPoint.y, collCheck.collPoint.z);
printf("(in collDetectRectGrid): Collision triangle: %u\n", collCheck.collIndex);
printf("(in collDetectRectGrid): Cube: %u\n", currCube);
printf("(in collDetectRectGrid): Compartment: %u\n", compartment);
printf("(in collDetectRectGrid): FiberInside: %u\n", fiberInside);
if (u<=u_max-(u_max-u_min)*k_permeability){ // The spin does not permeate the membrane
endPos = reflectPos(startPos, targetPos, collCheck.collPoint, collCheck.collIndex, collCheck.collisionType);
u_max = u_max-(u_max-u_min)*k_permeability;
//printf("(in spinKernel.cu::collDetectRTree): Particle bounces off membrane\n");
//printf("(in spinKernel.cu::collDetectRTree): Endpos: [%g,%g,%g]\n", endPos.x, endPos.y, endPos.z);
//reflectPos(startPos, targetPos, collCheck.collPoint, collCheck.collIndex, collCheck.collisionType);
} else{ // The spin permeates the membrane
u_min = u_max-(u_max-u_min)*k_permeability;
// Change the compartment (and fiber, if appropriate) assignment of the spin
// uint membraneType = tex1Dfetch(texTriInfo, collCheck.collIndex*3+1);
if (compartment == 2){
if (tex1Dfetch(texTriInfo, collCheck.collIndex*3+1) == 0){ // We are going from compartment 2 through axon surface - new compartment is 1
compartment = 1;
} else { // We are going from compartment 2 through myelin surface - new compartment is 0
compartment = 0;
fiberInside = UINT16_MAX;
}
} else if (compartment == 1){
compartment = 2; // We are going from compartment 1 through axon surface - new compartment is 2
} else if (compartment == 3){
compartment = 0; // We are going from compartment 3 through glia surface - new compartment is 0
fiberInside = UINT16_MAX;
} else {
fiberInside = tex1Dfetch(texTriInfo, collCheck.collIndex*3+0);
if (tex1Dfetch(texTriInfo, collCheck.collIndex*3+1) == 1){ // We are going from compartment 0 through myelin surface - new compartment is 2
compartment = 2;
} else { // We are going from compartment 0 through glia surface - new compartment is 3
compartment = 3;
}
}
}
}
// Redefine the start and end points for the reflected path, then repeat until no collision is detected.
startPos = collCheck.collPoint;
targetPos = endPos;
excludedTriangle = collCheck.collIndex; // Make sure we don't detect a collision with the triangle which the particle bounces from
}
//printf("test\n\n");
return endPos;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////
// Function name: collDetect
// Description: Determine whether a particle trying to travel from oPos to pos hits a triangle.
// Use either the method of a rectangular grid or an R-Tree.
////////////////////////////////////////////////////////////////////////////////////////////////////////
__device__ float3 collDetect(float3 oPos, float3 pos, float u, uint8 &compartment, uint16 &fiberInside, uint* trianglesInCubes, uint* cubeCounter){
//printf("collDetect ....");
//if (k_triSearchMethod == 0){
return collDetectRectGrid(oPos,pos,u,compartment,fiberInside,trianglesInCubes,cubeCounter);
//} else {
// return collDetectRTree(oPos, pos, u, compartment, fiberInside);
//}
//return pos;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////
// Function name: integrate
// Description: "Main" function for GPU kernel computation, called from spinSystem.cu, invokes all
// the functions above. Computes the spin movement and signal for each spin by
// performing the below computation in parallel on multiple threads.
///////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void integrate(float3* oldPos,
uint2* oldSeed,
//float4* spinInfo,
spinData* spinInfo,
float deltaTime,
float permeability,
uint numBodies,
float gradX, float gradY, float gradZ,
float phaseConstant,
uint itr, uint* trianglesInCubes, uint* cubeCounter, float* m_dStdDevs, float* m_dT2Values)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
//printf("KERNEL\n");
if (index>=m_dNumSpins){
printf("index>=numBodies\n\n");
return;
}
float3 pos = oldPos[index];//make_float3(1.2,1.2,1.2);// // pos = particle position
uint2 seed2 = oldSeed[index];//make_uint2(1.2,1.2); // seed4 = seed values (currently only using first 2 values)
//printf("pos: %f; %f; %f; \n seed2: %u; %u;\n\n ",pos.x,pos.y,pos.z,seed2.x,seed2.y);
//printf("k_reflectionType: %u, k_triSearchMethod: %u, k_numCubes: %u, k_totalNumCubes: %u, k_maxTrianglesPerCube: %u, k_cubeLength: %u, k_nFibers: %u, k_nCompartments: %f, k_deltaTime: %f\n\n ",k_reflectionType,k_triSearchMethod,k_numCubes,k_totalNumCubes,k_maxTrianglesPerCube,k_cubeLength,k_nFibers,k_nCompartments, k_deltaTime);
uint8 compartment = spinInfo[index].compartmentType;
uint16 fiberInside = spinInfo[index].insideFiber;
/////////////////////////////////////////////////////////////////////////////////
// Now apply the brownian motion (free diffusion). We simulate brownian motion
// with a random walk where the x, y, and z componenets are drawn from a
// normal distribution with mean 0 and standard deviation of sqrt(2*ADC*deltaTime).
// From wikipedia http://en.wikipedia.org/wiki/Random_walk:
// In 3D, the variance corresponding to the Green's function of the diffusion equation is:
// sigma^2 = 6*D*t
// sigma^2 corresponds to the distribution associated to the vector R that links the two
// ends of the random walk, in 3D. The variance associated to each component Rx, Ry or Rz
// is only one third of this value (still in 3D).
// Thus, the standard deviation of each component is sqrt(2*ADC*deltaTime)
//////////////////////////////////////////////////////////////////////////////////
//uint rseed[2];
//rseed[0] = seed2.x;
//rseed[1] = seed2.y;
for (uint i=0; i<interations; i++){
// Take a random walk...
// myRandn returns 3 PRNs from a normal distribution with mean 0 and SD of 1.
// So, we just need to scale these with the desired SD to get the displacements
// for the random walk.
// myRandn also returns a bonus uniformly distributed PRN as a side-effect of the
// Box-Muller transform used to generate normally distributed PRNs.
//printf("Random walk %u\n\n",i);
float u;
float3 brnMot;// = make_float3(22.2,22.2,22.2);
//myRandn(rseed, brnMot.y, brnMot.x, brnMot.z, u);
myRandn(seed2, brnMot.y, brnMot.x, brnMot.z, u);
float3 oPos = pos; // Store a copy of the old position before we update it
//printf("k_stdDevs[%u]: %f \n\n",compartment,m_dStdDevs[compartment]);
pos.x += brnMot.x * m_dStdDevs[compartment];
pos.y += brnMot.y * m_dStdDevs[compartment];
pos.z += brnMot.z * m_dStdDevs[compartment];
//printf("In kernel 1\n");
// Test
if (index == 0){
/*printf("i = %u\n", i);
printf("index: %u\n", index);
printf("oPos: [%g,%g,%g]\n", oPos.x,oPos.y,oPos.z);
printf("pos: [%g,%g,%g]\n", pos.x,pos.y,pos.z);
printf("Compartment: %u\n", compartment);
printf("Fiberinside: %u\n", fiberInside);
printf("Signal magnitude: %g\n", signalMagnitude);
printf("Signal phase: %g\n", signalPhase);
printf("u (before assignment): %g\n", u);
printf("rseed after: [%u,%u]\n", rseed[0], rseed[1]);
printf("[%g,%g,%g,%g,%g,%g,%u,%u]\n", oPos.x, oPos.y, oPos.z, pos.x, pos.y, pos.z, compartment, fiberInside);
*/
//oPos.x = 0.0; oPos.y = 0.0; oPos.z = 0.01; // oPos.x = 0.7; oPos.y = 0.0; oPos.z = 0.01;
//pos.x = 0.1; pos.y = 0.2; pos.z = -0.01; // pos.x = 0.632; pos.y = 0.067; pos.z = 0.01;
//compartment = 1;
//fiberInside = 0;
//u = 0.9;
//printf("u (after assignment): %g\n", u);
}
// Do a collision detection for the path the particle is trying to take
//printf("trianglesInCubes[%u]: %u \t cubeCounter[%u]: %u",index,trianglesInCubes[index],index,cubeCounter[index]);
pos = collDetect(oPos,pos,u,compartment,fiberInside,trianglesInCubes,cubeCounter);
//printf("In kernel 2\n");
// Don't let the spin leave the volume
if (pos.x > 1.0f) { pos.x = 1.0f; /*signalMagnitude = 0.0;*/ }
else if (pos.x < -1.0f) { pos.x = -1.0f; /*signalMagnitude = 0.0;*/ }
if (pos.y > 1.0f) { pos.y = 1.0f; /*signalMagnitude = 0.0;*/ }
else if (pos.y < -1.0f) { pos.y = -1.0f; /*signalMagnitude = 0.0;*/ }
if (pos.z > 1.0f) { pos.z = 1.0f; /*signalMagnitude = 0.0;*/ }
else if (pos.z < -1.0f) { pos.z = -1.0f; /*signalMagnitude = 0.0;*/ }
// Update MR signal magnitude
//signalMagnitude += -signalMagnitude/k_T2Values[compartment]*k_deltaTime;
//printf("In kernel 3\n");
//printf("m_dT2Values[%u]= %f\n",compartment,m_dT2Values[compartment]);
spinInfo[index].signalMagnitude += -spinInfo[index].signalMagnitude/m_dT2Values[compartment]*k_deltaTime;
//printf("updated spin signalMagnitude: %f",spinInfo[index].signalMagnitude);
// Update MR signal phase
//signalPhase += (gradX * pos.x + gradY * pos.y + gradZ * pos.z) * phaseConstant;
//printf("phaseConstant: %f, gradientX: %f, gradientY: %f,gradientZ: %f\n",phaseConstant,gradX,gradY,gradZ);
spinInfo[index].signalPhase += (gradX * pos.x + gradY * pos.y + gradZ * pos.z) * phaseConstant;
//printf("updated spin signal phase : %f",spinInfo[index].signalPhase);
}
// Store new position
//oldPos[index] = make_float4(pos, signalPhase);
oldPos[index] = pos;
// Store new seed values
//oldSeed[index].x = rseed[0];
//oldSeed[index].y = rseed[1];
oldSeed[index].x = seed2.x;
oldSeed[index].y = seed2.y;
// Store new values of compartment and signal magnitude and phase
//spinInfo[index].signalMagnitude = signalMagnitude;
//spinInfo[index].signalPhase = signalPhase;
spinInfo[index].compartmentType = compartment;
spinInfo[index].insideFiber = fiberInside;
}
#endif
/////////////////////////////////////////////////////////////////////////////////////////
// File name: spinSystem.cu
// Description: Definition of all CUDA functions that are not used inside the
// kernel.
/////////////////////////////////////////////////////////////////////////////////////////
void print_last_CUDA_error()
/* just run hipGetLastError() and print the error message if its return value is not hipSuccess */
{
hipError_t hipError_t;
hipError_t = hipGetLastError();
if(hipError_t != hipSuccess)
{
printf(" hipGetLastError() returned %d: %s\n", hipError_t, hipGetErrorString(hipError_t));
}
}
extern "C"
{
void checkCUDA()
{
// cuda(Free(0));
}
///////////////////////////////////////////////////////////////////////
// Function name: allocateArray
// Description: Allocate memory on device for an array pointed to
// by devPtr of size size.
///////////////////////////////////////////////////////////////////////
void allocateArray(void **devPtr, size_t size)
{
hipMalloc(devPtr,size);
}
///////////////////////////////////////////////////////////////////////
// Function name: freeArray
// Description: Free up the device memory used by the array pointed
// to by devPtr
///////////////////////////////////////////////////////////////////////
void freeArray(void *devPtr)
{
hipFree(devPtr);
}
///////////////////////////////////////////////////////////////////////
// Function name: threadSync
// Description: Block until the device has completed all preceding
// requested tasks.
///////////////////////////////////////////////////////////////////////
void threadSync()
{
hipDeviceSynchronize();
}
///////////////////////////////////////////////////////////////////////
// Function name: copyArrayFromDevice
// Description: Copy array from device (pointed to by device parameter)
// to array on host (pointed to by host parameter)
///////////////////////////////////////////////////////////////////////
void copyArrayFromDevice(void* host, const void* device, unsigned int vbo, int size)
{
if (vbo)
hipGLMapBufferObject__((void**)&device, vbo);
hipMemcpy(host, device, size, hipMemcpyDeviceToHost);
if (vbo)
hipGLUnmapBufferObject(vbo);
}
////////////////////////////////////////////////////////////////////////
// Function name: copyArrayToDevice
// Description: Copy array from host (pointed to by host parameter)
// to array on device (pointed to by device parameter)
////////////////////////////////////////////////////////////////////////
void copyArrayToDevice(void* device, const void* host, int offset, int size)
{
hipMemcpy((char *) device + offset, host, size, hipMemcpyHostToDevice);
}
/////////////////////////////////////////////////////////////////////////
// Function name: copyConstantToDevice
// Description: Copy constant from host (with name host) to device
// (with name device).
/////////////////////////////////////////////////////////////////////////
void copyConstantToDevice(void* device, const void* host, int offset, int size)
{
hipMemcpyToSymbol((char *) device, host, size);
}
//////////////////////////////////////////////////////////////////////////
// Function name: registerGLBufferObject
// Description: Registers the buffer object of ID vbo for access by CUDA.
//////////////////////////////////////////////////////////////////////////
void registerGLBufferObject(uint vbo)
{
hipGLRegisterBufferObject(vbo);
}
//////////////////////////////////////////////////////////////////////////
// Function name: unregisterGLBufferObject
// Description: Unregisters the buffer object of ID vbo for access by CUDA
// and releases any CUDA resources associated with the buffer.
//////////////////////////////////////////////////////////////////////////
void unregisterGLBufferObject(uint vbo)
{
hipGLUnregisterBufferObject(vbo);
}
//////////////////////////////////////////////////////////////////////////
// The following functions bind/unbind various arrays from host to device
// texture memory.
// Note: Should combine into one function
//////////////////////////////////////////////////////////////////////////
void bindCubeCounter(uint* ptr, int size) // Test
{
hipBindTexture(0,texCubeCounter,ptr,size*sizeof(uint));
}
void unbindCubeCounter() // Test
{
hipUnbindTexture(texCubeCounter);
}
void bindTrianglesInCubes(uint* ptr, int size) // Test
{
hipBindTexture(0,texTrianglesInCubes,ptr,size*sizeof(uint));
}
void unbindTrianglesInCubes() // Test
{
hipUnbindTexture(texTrianglesInCubes);
}
/*
void bindTrgls(uint* ptr, int size) // Test
{
hipBindTexture(0,texTrgls,ptr,size*sizeof(uint));
}
void unbindTrgls() // Test
{
hipUnbindTexture(texTrgls);
}
*/
void bindVertices(float* ptr, int size) // Test
{
if (size>0){
hipBindTexture(0,texVertices,ptr,size*sizeof(float));
}
}
void unbindVertices() // Test
{
hipUnbindTexture(texVertices);
}
void bindTriangleHelpers(float* ptr, int size) // Test
{
if (size>0){
hipBindTexture(0,texTriangleHelpers,ptr,size*sizeof(float));
}
}
void unbindTriangleHelpers() // Test
{
hipUnbindTexture(texTriangleHelpers);
}
void bindRTreeArray(float* ptr, int size) // Test
{
if (size>0){
hipBindTexture(0,texRTreeArray,ptr,size*sizeof(float));
}
}
void unbindRTreeArray() // Test
{
hipUnbindTexture(texRTreeArray);
}
void bindTreeIndexArray(uint* ptr, int size) // Test
{
if (size>0){
hipBindTexture(0,texCombinedTreeIndex,ptr,size*sizeof(uint));
}
}
void unbindTreeIndexArray() // Test
{
hipUnbindTexture(texCombinedTreeIndex);
}
void bindTriInfo(uint* ptr, int size) // Test
{
if (size>0){
hipBindTexture(0,texTriInfo,ptr,size*sizeof(uint));
}
}
void unbindTriInfo() // Test
{
hipUnbindTexture(texTriInfo);
}
///////////////////////////////////////////////////////////////////////////
// Function name: integrateSystem
// Description: Run the kernel for spin computations
///////////////////////////////////////////////////////////////////////////
void integrateSystem(
uint pos,
uint* randSeed,
spinData* spinInfo,
float deltaTime,
float permeability,
uint numBodies,
float3 gradient,
float phaseConstant,
uint iterations, uint* trianglesInCubes, uint* cubeCounter,uint m_nMembraneTypes, uint m_nPosValues, uint m_numSpins, uint m_nSeedValues, uint m_numCompartments, float* m_hT2Values, float* m_hStdDevs, uint m_reflectionType, uint m_triSearchMethod, uint m_nFibers
){
static bool firstCall = true;
int i =0;
struct hipDeviceProp_t devInfo;
hipGetDeviceProperties(&devInfo, i);
if (firstCall){
// Write out some info
printf("\n\n\n\n\nCUDA device info:\n\n");
printf("Name: %s\n", devInfo.name);
printf("totalGlobalMem: %u\n", devInfo.totalGlobalMem);
printf("sharedMemPerBlock: %u\n", devInfo.sharedMemPerBlock);
printf("regsPerBlock: %u\n", devInfo.regsPerBlock);
printf("warpSize: %u\n", devInfo.warpSize);
printf("memPitch: %u\n", devInfo.memPitch);
printf("maxThreadsPerBlock: %u\n", devInfo.maxThreadsPerBlock);
printf("\n\n");
firstCall = false;
}
hipError_t cudaerr;
float *Pos;
hipGLMapBufferObject__((void**)&Pos, pos);
float* m_dSpins;
uint* m_dSeed;
//allocateArray((void**)&m_dSpins[0], sizeof(float) * m_nSpinValues * m_numSpins);
//allocateArray((void**)&m_dSpins[1], sizeof(float) * m_nSpinValues * m_numSpins);
//hipMalloc((void**)m_dSpins, m_numSpins*sizeof(spinData));
//hipMemcpy(m_dSpins, spinInfo, m_numSpins*sizeof(spinData),hipMemcpyHostToDevice);
//hipMalloc((void**)m_dSeed, sizeof(uint) * m_nSeedValues * m_numSpins);
//hipMemcpy(m_dSeed,randSeed , m_numSpins*m_nSeedValues*sizeof(uint),hipMemcpyHostToDevice);
float* m_dStdDevs;
float* m_dT2Values;
hipMalloc((void**)&m_dT2Values, m_numCompartments*sizeof(float));
hipMemcpy(m_dT2Values, m_hT2Values, m_numCompartments*sizeof(float),hipMemcpyHostToDevice);
hipMalloc((void**)&m_dStdDevs, m_numCompartments*sizeof(float));
hipMemcpy(m_dStdDevs, m_hStdDevs, m_numCompartments*sizeof(float),hipMemcpyHostToDevice);
//print_last_cuda_error(); // throw error
// Set constants in device memory
hipMemcpyToSymbol(k_reflectionType, &m_reflectionType, sizeof(uint));
hipMemcpyToSymbol(k_triSearchMethod, &m_triSearchMethod, sizeof(uint));
hipMemcpyToSymbol(k_nFibers, &m_nFibers, sizeof(uint));
hipMemcpyToSymbol(k_nCompartments, &m_numCompartments, sizeof(uint));
hipMemcpyToSymbol(k_nCompartments, &m_numCompartments, sizeof(uint));
hipMemcpyToSymbol(k_permeability, &permeability, sizeof(float));
hipMemcpyToSymbol(k_deltaTime, &deltaTime, sizeof(float));
hipMemcpyToSymbol(k_T2Values, &m_hT2Values, sizeof(float));
hipMemcpyToSymbol(k_stdDevs, &m_hStdDevs, sizeof(float));
// Number of threads will normally be 128
int numThreads = min(90, numBodies);
int numBlocks = numBodies/numThreads;
hipDeviceSynchronize();
// Execute the kernel
hipLaunchKernelGGL((
integrate), dim3(numBlocks), dim3(numThreads), 0, 0, (float3*) pos, (uint2*) randSeed, spinInfo, deltaTime, permeability, numBodies, gradient.x, gradient.y, gradient.z, phaseConstant, iterations, trianglesInCubes, cubeCounter, m_dStdDevs,m_dT2Values);
cudaerr = hipDeviceSynchronize();
if (cudaerr != hipSuccess)
printf("kernel launch failed with error \"%s\".\n",
hipGetErrorString(cudaerr));
/* hipGLUnmapBufferObject(Pos);
hipMemcpy( m_posVbo, m_hPos, sizeof(float)*numBodies*m_nPosValues , hipMemcpyDeviceToHost );
hipMemcpy( m_dSeed, m_hSeed, sizeof(uint)*numBodies*m_nSeedValues, hipMemcpyDeviceToHost );
hipMemcpy( m_dSpins, m_hSpins, sizeof(spinData)*numBodies, hipMemcpyDeviceToHost );
hipMemcpy( m_dTrianglesInCubes, m_hTrianglesInCubes, sizeof(uint)*m_nMembraneTypes*k_totalNumCubes*k_maxTrianglesPerCube, hipMemcpyDeviceToHost );
hipMemcpy( m_dCubeCounter, m_hCubeCounter, sizeof(uint)*m_nMembraneTypes*k_totalNumCubes, hipMemcpyDeviceToHost );
*/
}
//////////////////////////////////////////////////////////////////////////////////////
// Function name: integrateSystemVBO
// Description: Register the vertex buffer object for access by CUDA, perform
// the GPU computation using integrateSystem, then unregister
// the VBO.
//////////////////////////////////////////////////////////////////////////////////////
void integrateSystemVBO(
float* vboPos,
uint* randSeed,
spinData* spinInfo,
float deltaTime,
float permeability,
uint numBodies,
float3 gradient,
float phaseConstant,
uint itr, uint* trianglesInCubes, uint* cubeCounter,uint m_nMembraneTypes, uint m_nPosValues, uint m_numSpins, uint m_nSeedValues, uint m_numCompartments, float* m_hT2Values, float* m_hStdDevs, uint m_reflectionType, uint m_triSearchMethod, uint m_nFibers, uint m_nSpinValues, uint m_totalNumCubes, uint m_maxTrianglesPerCube, uint m_numCubes, uint m_posVbo
){
// hipGLMapBufferObject__((void**)&pos, vboPos);
static bool firstCall = true;
int i =0;
struct hipDeviceProp_t devInfo;
hipGetDeviceProperties(&devInfo, i);
if (firstCall){
// Write out some info
printf("\n\n\n\n\nCUDA device info:\n\n");
printf("Name: %s\n", devInfo.name);
printf("totalGlobalMem: %u\n", devInfo.totalGlobalMem);
printf("sharedMemPerBlock: %u\n", devInfo.sharedMemPerBlock);
printf("regsPerBlock: %u\n", devInfo.regsPerBlock);
printf("warpSize: %u\n", devInfo.warpSize);
printf("memPitch: %u\n", devInfo.memPitch);
printf("maxThreadsPerBlock: %u\n", devInfo.maxThreadsPerBlock);
printf("\n\n");
firstCall = false;
}
hipError_t cudaerr;
float *pos1;
hipGLMapBufferObject__((void**)&pos1, m_posVbo);
float* m_dStdDevs; // Test
float* m_dT2Values;
spinData* m_dSpins;
uint2* m_dSeed;
float3* t_pos;
float3* pos = (float3*) pos1;
// Trying to cast the Pos variable form float to float3. The same is done for the seed variable (cast from uint to uint2).
/*for(int jj=0;jj<numBodies*m_nPosValues;jj+=3)
{
pos[jj] = make_float3(vboPos[jj],vboPos[jj+1],vboPos[jj+2]);
//std::cout<<pos[jj].x<<"\n";
}
uint2* seeds = (uint2*)malloc(sizeof(uint2)* m_nSeedValues * m_numSpins);
for(int jj=0;jj<* m_nSeedValues * m_numSpins;jj++)
{
pos[jj] = make_float3(vboPos[jj],vboPos[jj+1],vboPos[jj+2]);
//std::cout<<pos[jj].x<<"\n";
}
*/
// Coping the variables from host to device
hipMalloc(&t_pos, sizeof(float3) * m_nPosValues * m_numSpins);
hipMemcpy(t_pos, pos, sizeof(float3) * m_nPosValues * m_numSpins, hipMemcpyHostToDevice);
hipMalloc((void **)&m_dSpins, sizeof(spinData) * m_numSpins);
hipMemcpy(m_dSpins, spinInfo, sizeof(spinData) * m_numSpins,hipMemcpyHostToDevice);
uint2* m_dseed = (uint2*) randSeed;
hipMalloc((void **)&m_dSeed, sizeof(uint2) * m_nSeedValues * m_numSpins);
hipMemcpy(m_dSeed,m_dseed , m_numSpins*m_nSeedValues*sizeof(uint),hipMemcpyHostToDevice);
uint* m_dTrianglesInCubes;
uint* m_dCubeCounter;
hipMalloc((void**)&m_dTrianglesInCubes, m_totalNumCubes*m_maxTrianglesPerCube*sizeof(uint));
hipMalloc((void**)&m_dCubeCounter, m_totalNumCubes*sizeof(uint));
hipMemcpy(m_dTrianglesInCubes, trianglesInCubes, m_totalNumCubes*m_maxTrianglesPerCube*sizeof(uint),hipMemcpyHostToDevice);
hipMemcpy(m_dCubeCounter, cubeCounter, m_totalNumCubes*sizeof(uint),hipMemcpyHostToDevice);
hipMalloc((void**)&m_dT2Values, m_numCompartments*sizeof(float));
hipMemcpy(m_dT2Values, m_hT2Values, m_numCompartments*sizeof(float),hipMemcpyHostToDevice);
hipMalloc((void**)&m_dStdDevs, m_numCompartments*sizeof(float));
hipMemcpy(m_dStdDevs, m_hStdDevs, m_numCompartments*sizeof(float),hipMemcpyHostToDevice);
//print_last_cuda_error();
// Set constants in device memory
hipMemcpyToSymbol(k_reflectionType, &m_reflectionType, sizeof(uint));
hipMemcpyToSymbol(k_triSearchMethod, &m_triSearchMethod, sizeof(uint));
hipMemcpyToSymbol(k_nFibers, &m_nFibers, sizeof(uint));
hipMemcpyToSymbol(k_nCompartments, &m_numCompartments, sizeof(uint));
hipMemcpyToSymbol(k_nCompartments, &m_numCompartments, sizeof(uint));
hipMemcpyToSymbol(k_permeability, &permeability, sizeof(float));
hipMemcpyToSymbol(k_deltaTime, &deltaTime, sizeof(float));
hipMemcpyToSymbol(k_T2Values, m_dT2Values, m_numCompartments*sizeof(float));
hipMemcpyToSymbol(k_stdDevs, m_dStdDevs,m_numCompartments*sizeof(float));
float m_cubeLength = 2.0f / m_numCubes;
hipMemcpyToSymbol(k_cubeLength, &m_cubeLength, sizeof(float));
hipMemcpyToSymbol(m_dNumSpins, &numBodies, sizeof(uint));
hipMemcpyToSymbol(gradientX, &(gradient.x), sizeof(float));
hipMemcpyToSymbol(gradientY, &(gradient.y), sizeof(float));
hipMemcpyToSymbol(gradientZ, &(gradient.z), sizeof(float));
hipMemcpyToSymbol(interations, &(itr), sizeof(float));
// Number of threads will normally be 128
int numThreads = min(128, numBodies);
int numBlocks = numBodies/numThreads;
//hipDeviceSynchronize();
// Execute the kernel
hipLaunchKernelGGL((
integrate), dim3(numBlocks), dim3(numThreads), 0, 0, t_pos, m_dSeed, m_dSpins, deltaTime, permeability, numBodies, gradient.x, gradient.y, gradient.z, phaseConstant, itr, trianglesInCubes, cubeCounter, m_dStdDevs,m_dT2Values);
cudaerr = hipDeviceSynchronize();
if (cudaerr != hipSuccess)
printf("kernel launch failed with error \"%s\".\n",
hipGetErrorString(cudaerr));
//Coping the vraiables form Device to Host
hipMemcpy( pos, t_pos, m_numSpins*m_nPosValues*sizeof(float3), hipMemcpyDeviceToHost );
hipMemcpy( m_dseed, m_dSeed, sizeof(uint2) * m_nSeedValues * m_numSpins, hipMemcpyDeviceToHost );
hipMemcpy( spinInfo, m_dSpins, sizeof(spinData) * m_numSpins, hipMemcpyDeviceToHost );
randSeed = (uint *)m_dseed;
vboPos = (float*)pos;
/*for(int jj=0;jj<numBodies*m_nPosValues;jj+=3)
{
vboPos[jj] = pos[jj].x;
vboPos[jj+1] = pos[jj].y;
vboPos[jj+2] = pos[jj].z;
//std::cout<<pos[jj].x<<"\n";
}
*/
hipGLUnmapBufferObject(m_posVbo);
hipFree(t_pos);
hipFree(m_dSeed);
hipFree(m_dTrianglesInCubes);
hipFree(m_dCubeCounter);
hipFree(m_dSpins);
hipFree(m_dStdDevs);
hipFree(m_dT2Values);
}
} // extern "C"
|
5fc307e9875a71a751578a6746a4b44a3060939a.cu
|
#include <cstdlib>
#include <cstdio>
#include <string.h>
#include <GL/glut.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include <helper_cuda_gl.h>
#include <helper_cuda_drvapi.h>
#include <helper_image.h>
#include <helper_math.h>
#include <helper_string.h>
#include <helper_timer.h>
#include <cuda_gl_interop.h>
//#include "spinKernel.cu"
#include "radixsort.cu"
//#include "dSimDataTypes.h"
#define PI 3.14159265358979f
#define TWOPI 6.28318530717959f
///////////////////////////////////////////////////////////////////////////////////////
// File name: spinKernel.cu
// Description: Kernel for spin computations using GPU
///////////////////////////////////////////////////////////////////////////////////////
#ifndef _SPIN_KERNEL_H_
#define _SPIN_KERNEL_H_
#include <stdio.h>
#include <math.h>
#include <helper_math.h>
#include "math_constants.h"
#include "cuda.h"
#include "options.h"
#include "dSimDataTypes.h"
//////////////////////////////////////////////////////////////////////////////////
// Define texture arrays and constants, copied to device from host.
//////////////////////////////////////////////////////////////////////////////////
texture<uint,1,cudaReadModeElementType> texCubeCounter;
texture<uint,1,cudaReadModeElementType> texTrianglesInCubes;
//texture<uint,1,cudaReadModeElementType> texTrgls;
texture<float,1,cudaReadModeElementType> texVertices;
texture<float,1,cudaReadModeElementType> texTriangleHelpers;
texture<float,1,cudaReadModeElementType> texRTreeArray;
texture<uint,1,cudaReadModeElementType> texCombinedTreeIndex;
texture<uint,1,cudaReadModeElementType> texTriInfo;
__device__ uint k_reflectionType;
__device__ uint k_triSearchMethod;
__device__ uint k_numCubes;
__device__ uint k_totalNumCubes;
__device__ uint k_maxTrianglesPerCube;
__device__ float k_cubeLength;
__device__ uint k_nFibers;
__device__ uint k_nCompartments;
__device__ float k_permeability;
__device__ float k_deltaTime;
__device__ float *k_T2Values;
__device__ float *k_stdDevs;
__device__ float gradientX;
__device__ float gradientY;
__device__ float gradientZ;
__device__ uint m_dNumSpins;
__device__ int interations;
typedef unsigned int uint;
/////////////////////////////////////////////////////////////////////////////////////
// The structure collResult will be used to store outcomes from checks of whether
// collision occurs between a ray and a triangle.
/////////////////////////////////////////////////////////////////////////////////////
typedef struct _collResult
{
uint collisionType; // 0 if no collision, 1 if collision within triangle, 2 if collision with triangle edge, 3 if collision with triangle vertex
float3 collPoint; // Point of collision with triangle
uint collIndex; // Index of collision triangle
float collDistSq; // Distance squared from starting point to collision point
}collResult;
// Some simple vector ops for float3's (dot and length are defined in cudautil_math)
//#define dot(u,v) ((u).x * (v).x + (u).y * (v).y + (u).z * (v).z)
//#define length(v) sqrt(dot(v,v)) // norm (vector length)
#define d(u,v) length(u-v) // distance (norm of difference)
//////////////////////////////////////////////////////////////////////////
// Function name: point_line_dist
// Description: Returns the shortest distance from a point P to a
// line defined by two points (LP1 and LP2)
//////////////////////////////////////////////////////////////////////////
__device__ float point_line_dist(float3 P, float3 LP1, float3 LP2){
float3 v = LP2-LP1;
float b = dot(P-LP1,v)/dot(v,v);
return d(P,LP1+b*v);
}
///////////////////////////////////////////////////////////////////////////
// Function name: point_seg_dist
// Description: Returns the shortest distance from a point P to a
// line segment defined by two points (SP1 and SP2)
///////////////////////////////////////////////////////////////////////////
__device__ float point_seg_dist(float3 P, float3 SP1, float3 SP2){
float3 v = SP2-SP1;
float c1 = dot(P-SP1,v);
if (c1<=0) return d(P,SP1);
float c2 = dot(v,v);
if (c2<=c1) return d(P,SP2);
float3 Pb = SP1 + c1/c2*v;
return d(P,Pb);
}
//////////////////////////////////////////////////////////////////////////////
// Function name: boxMuller
// Description: Generates a pair of independent standard normally
// distributed random numbers from a pair of
// uniformly distributed random numbers, using the basic form
// of the Box-Muller transform
// (see http://en.wikipedia.org/wiki/Box%E2%80%93Muller_transform)
//////////////////////////////////////////////////////////////////////////////
__device__ void boxMuller(float& u1, float& u2){
float r = sqrtf(-2.0f * __logf(u1));
float phi = TWOPI * u2;
u1 = r * __cosf(phi);
u2 = r * __sinf(phi);
}
//////////////////////////////////////////////////////////////////////////////
// Function name: myRand
// Description: Simple multiply-with-carry PRNG that uses two seeds
// (seed[0] and seed[1]) (Algorithm from George Marsaglia:
// http://en.wikipedia.org/wiki/George_Marsaglia)
//////////////////////////////////////////////////////////////////////////////
//__device__ uint myRand(uint seed[]){
// seed[0] = 36969 * (seed[0] & 65535) + (seed[0] >> 16);
// seed[1] = 18000 * (seed[1] & 65535) + (seed[1] >> 16);
// return (seed[0] << 16) + seed[1];
//}
__device__ uint myRand(uint2 &seed){
seed.x = 36969 * (seed.x & 65535) + (seed.x >> 16);
seed.y = 18000 * (seed.y & 65535) + (seed.y >> 16);
return (seed.x << 16) + seed.y;
}
/////////////////////////////////////////////////////////////////////////////
// Function name: myRandf
// Description: Returns a random float r in the range 0<=r<=1
/////////////////////////////////////////////////////////////////////////////
//__device__ float myRandf(uint seed[]){
// return ((float)myRand(seed) / 4294967295.0f);
//}
/////////////////////////////////////////////////////////////////////////////
// Function name: myRandDir
// Description: Return a vector with a specified magnitude (adc) and
// a random direction
/////////////////////////////////////////////////////////////////////////////
//__device__ void myRandDir(uint seed[], float adc, float3& vec){
// // Azimuth and elevation are on the interval [0,2*pi]
// // (2*pi)/4294967294.0 = 1.4629181e-09f
// float az = (float)myRand(seed) * 1.4629181e-09f;
// float el = (float)myRand(seed) * 1.4629181e-09f;
// vec.z = adc * __sinf(el);
// float rcosel = adc * __cosf(el);
// vec.x = rcosel * __cosf(az);
// vec.y = rcosel * __sinf(az);
// return;
//}
//////////////////////////////////////////////////////////////////////////////
// Function name: myRandn
// Description: Returns three normally distributed random numbers
// and one uniformly distributed random number.
//////////////////////////////////////////////////////////////////////////////
/*__device__ void myRandn(uint seed[], float& n1, float& n2, float& n3, float& u){
// We want random numbers in the range (0,1], i.e. 0<n<=1
n1 = ((float)myRand(seed) + 1.0f) / 4294967296.0f;
n2 = ((float)myRand(seed) + 1.0f) / 4294967296.0f;
n3 = ((float)myRand(seed) + 1.0f) / 4294967296.0f;
u = ((float)myRand(seed) + 1.0f) / 4294967296.0f;
// Note that ULONG_MAX=4294967295
float n4 = u;
boxMuller(n1,n2);
boxMuller(n3,n4);
return;
}*/
__device__ void myRandn(uint2 &seed, float& n1, float& n2, float& n3, float& u){
// We want random numbers in the range (0,1], i.e. 0<n<=1
n1 = ((float)myRand(seed) + 1.0f) / 4294967296.0f;
n2 = ((float)myRand(seed) + 1.0f) / 4294967296.0f;
n3 = ((float)myRand(seed) + 1.0f) / 4294967296.0f;
u = ((float)myRand(seed) + 1.0f) / 4294967296.0f;
// Note that ULONG_MAX=4294967295
float n4 = u;
boxMuller(n1,n2);
boxMuller(n3,n4);
return;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Function name: calcCubePosGPU // Rename later to calcCubePos(...)
// Description: Function calculates the cube cell to which the given position belongs in uniform cube.
// Converts a position coordinate (ranging from (-1,-1,-1) to (1,1,1) to a cube
// coordinate (ranging from (0,0,0) to (m_numCubes-1, m_numCubes-1, m_numCubes-1)).
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
__device__ uint3 calcCubePosGPU(float3 p){
uint3 cubePos;
cubePos.x = floor((p.x + 1.0f) / k_cubeLength);
cubePos.y = floor((p.y + 1.0f) / k_cubeLength);
cubePos.z = floor((p.z + 1.0f) / k_cubeLength);
cubePos.x = max(0, min(cubePos.x, k_numCubes-1));
cubePos.y = max(0, min(cubePos.y, k_numCubes-1));
cubePos.z = max(0, min(cubePos.z, k_numCubes-1));
return cubePos;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Function name: calcCubeHashGPU // Rename later to calcCubeHash(...)
// Description: Calculate address in cube from position (clamping to edges)
///////////////////////////////////////////////////////////////////////////////////////////////////////////////
__device__ uint calcCubeHashGPU(uint3 cubePos){
return cubePos.z * k_numCubes * k_numCubes + cubePos.y * k_numCubes + cubePos.x;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Function name: reflectPos
// Description: Given a particle that tries to travel from startPos to targetPos, but collides with triangle
// number collTriIndex at collPos, we calculate the position which the particle gets reflected to.
// This applies if reflectionType==1. If reflectionType==0, we do a simplified reflection,
// where the particle just gets reflected to its original position. This is also done if we hit
// a triangle edge or a triangle vertex (which gives collisionType==2 or collisionTYpe==3).
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__device__ float3 reflectPos(float3 startPos, float3 targetPos, float3 collPos, uint collTriIndex, uint collisionType){
float3 reflectedPos;
if ((k_reflectionType==0)|(collisionType>1)){ // We simply reflect back to the starting point
reflectedPos = startPos;
} else { // We reflect the target point through the triangle - see http://en.wikipedia.org/wiki/Transformation_matrix
float3 sPosShifted = targetPos-collPos;
float3 normalVec;
normalVec = make_float3(tex1Dfetch(texTriangleHelpers,collTriIndex*12+0),tex1Dfetch(texTriangleHelpers,collTriIndex*12+1),tex1Dfetch(texTriangleHelpers,collTriIndex*12+2));
reflectedPos.x = (1-2*normalVec.x*normalVec.x)*sPosShifted.x - 2*normalVec.x*normalVec.y*sPosShifted.y - 2*normalVec.x*normalVec.z*sPosShifted.z + collPos.x;
reflectedPos.y = -2*normalVec.x*normalVec.y*sPosShifted.x + (1-2*normalVec.y*normalVec.y)*sPosShifted.y - 2*normalVec.y*normalVec.z*sPosShifted.z + collPos.y;
reflectedPos.z = -2*normalVec.x*normalVec.z*sPosShifted.x - 2*normalVec.y*normalVec.z*sPosShifted.y + (1-2*normalVec.z*normalVec.z)*sPosShifted.z + collPos.z;
}
return reflectedPos;
}
//////////////////////////////////////////////////////////////////////////////////////////////
// Function name: triCollDetect
// Description: Find whether the path from oPos to pos intersects triangle no. triIndex.
// Returns the collision result, which consists of
// result.collPoint = the collision/intersection point between
// the ray and the triangle.
// result.collIndex = the index of the collision triangle if
// collision occurs
// result.collisionType = 0 if no collision, 1 within triangle boundaries,
// 2 if collision with triangle edge, 3 if
// collision with triangle vertex
// result.collDistSq = the distance (squared) from oPos to
// the collision point.
//////////////////////////////////////////////////////////////////////////////////////////////
__device__ collResult triCollDetect(float3 oPos, float3 pos, uint triIndex){
uint firstPointIndex;
float uv, uu, vv, wu, wv, r, s, t, stDen;
float3 triP1, d, w, n, u, v, collPoint;
collResult result;
result.collisionType = 0;
// firstPointIndex is the index of the "first" point in the triangle
firstPointIndex = tex1Dfetch(texTriInfo, triIndex*3+2);
// triP1 holds the coordinates of the first point
triP1 = make_float3(tex1Dfetch(texVertices,firstPointIndex*3+0),tex1Dfetch(texVertices,firstPointIndex*3+1),tex1Dfetch(texVertices,firstPointIndex*3+2));
// n: normal to the triangle. u: vector from first point to second point. v: vector from first point to third point. uv, uu, vv: dot products.
n = make_float3(tex1Dfetch(texTriangleHelpers,triIndex*12+0),tex1Dfetch(texTriangleHelpers,triIndex*12+1),tex1Dfetch(texTriangleHelpers,triIndex*12+2));
u = make_float3(tex1Dfetch(texTriangleHelpers,triIndex*12+3),tex1Dfetch(texTriangleHelpers,triIndex*12+4),tex1Dfetch(texTriangleHelpers,triIndex*12+5));
v = make_float3(tex1Dfetch(texTriangleHelpers,triIndex*12+6),tex1Dfetch(texTriangleHelpers,triIndex*12+7),tex1Dfetch(texTriangleHelpers,triIndex*12+8));
uv = tex1Dfetch(texTriangleHelpers,triIndex*12+9);
uu = tex1Dfetch(texTriangleHelpers,triIndex*12+10);
vv = tex1Dfetch(texTriangleHelpers,triIndex*12+11);
// First find whether the path intersects the plane defined by triangle i. See method at http://softsurfer.com/Archive/algorithm_0105/algorithm_0105.htm
r = dot(n,triP1-oPos)/dot(n,pos-oPos);
if ((0<r)&(r<1)){
// Then find if the path intersects the triangle itself. See method at http://softsurfer.com/Archive/algorithm_0105/algorithm_0105.htm
d = r*(pos-oPos);
collPoint = oPos + d;
w = collPoint-triP1;
wu = dot(w,u);
wv = dot(w,v);
stDen = uv*uv-uu*vv;
s = (uv*wv-vv*wu)/stDen;
t = (uv*wu-uu*wv)/stDen;
if ( (s>=0)&(t>=0)&(s+t<=1) ){ // We have a collision with the triangle
result.collDistSq = dot(d,d);
result.collIndex = triIndex;
result.collPoint = collPoint;
result.collisionType = 1;
if ( (s==0)|(t==0)|(s+t==1) ){ // The collision point is on a triangle edge
result.collisionType = 2;
if ( ((s==0)&(t==0))|((s==0)&(t==1))|((s==1)&(t==0)) ){ // The collision point is on a triangle vertex
result.collisionType = 3;
}
}
}
}
return result;
}
/////////////////////////////////////////////////////////////////////////////////////////
// Function name: SearchRTreeArray
// Description: Find the leaf rectangles in the R-Tree which intersect the rectangle
// rect (=[x_min,y_min,z_min,x_max,y_max,z_max]). Normally, rect will
// be a bounding rectangle for a particle path and the leaf rectangles
// of the R-Tree will be bounding rectangles for fiber triangles.
// When the rectangles intersect, that means the particle might collide
// with the triangle. The indices of such triangles are written into
// intersectArray (to be further checked for actual collisions), and
// the number of intersecting rectangles is returned in the output
// foundCount.
/////////////////////////////////////////////////////////////////////////////////////////
__device__ uint SearchRTreeArray(float* rect, uint* interSectArray, uint8 &compartment, uint16 &fiberInside){
uint foundCount = 0;
uint stack[100]; // Maximum necessary stack size should be 1+7*(treeHeight) = 1+7*(n_levels-1). 100 should suffice for n_levels <= 15 - very big tree
int stackIndex = 0;
//printf("k_nFibers: %u\n", k_nFibers);
//printf("k_nCompartments: %u\n", k_nCompartments);
//uint k_nFibers = 17, k_nCompartments = 3;
//stack[stackIndex] = 0;
if (compartment != 0){ // We push the location of the root node onto the stack
stack[stackIndex] = tex1Dfetch(texCombinedTreeIndex,fiberInside*(k_nCompartments-1)+compartment); // = 0 for "first" tree, i.e. tree corresponding to innermost compartment
} else{
stack[stackIndex] = tex1Dfetch(texCombinedTreeIndex,0);
//printf("k_nFibers: %u\n", k_nFibers);
//printf("k_nCompartments: %u\n", k_nCompartments);
//printf("StackIndex: %u\n", stackIndex);
//printf("Stack for compartment %u: %i\n", compartment, stack[stackIndex]);
}
//printf("(in spinKernel.cu::SearchRTreeArray): rect: [%g,%g,%g,%g,%g,%g]\n", rect[0],rect[1],rect[2],rect[3],rect[4],rect[5]);
//printf("(in spinKernel.cu::SearchRTreeArray): stack[%i]: %u\n", stackIndex, stack[stackIndex]);
stackIndex++;
uint currentNodeIndex;
while (stackIndex > 0){ // Stop when we've emptied the stack
stackIndex--; // Pop the top node off the stack
currentNodeIndex = stack[stackIndex];
//printf("(in spinKernel.cu::SearchRTreeArray): currentNodeIndex: %u\n", currentNodeIndex);
for (int m=tex1Dfetch(texRTreeArray,currentNodeIndex+1)-1; m>=0; m--){
uint currentBranchIndex = currentNodeIndex+2 + m*7;
//printf("(in spinKernel.cu::SearchRTreeArray): m: %u\n", m);
//printf("(in spinKernel.cu::SearchRTreeArray): currentBranchIndex: %u\n", currentBranchIndex);
//See if the branch rectangle overlaps with the input rectangle
if (!( tex1Dfetch(texRTreeArray,currentBranchIndex+1) > rect[3] || // branchRect.x_min > rect.x_max
tex1Dfetch(texRTreeArray,currentBranchIndex+2) > rect[4] || // branchRect.y_min > rect.y_max
tex1Dfetch(texRTreeArray,currentBranchIndex+3) > rect[5] || // branchRect.z_min > rect.z_max
rect[0] > tex1Dfetch(texRTreeArray,currentBranchIndex+4) || // rect.x_min > branchRect.x_max
rect[1] > tex1Dfetch(texRTreeArray,currentBranchIndex+5) || // rect.y_min > branchRect.y_max
rect[2] > tex1Dfetch(texRTreeArray,currentBranchIndex+6) )) // rect.z_min > branchRect.z_max
{
if (tex1Dfetch(texRTreeArray,currentNodeIndex) > 0){ // We are at an internal node - push the node pointed to in the branch onto the stack
stack[stackIndex] = tex1Dfetch(texRTreeArray,currentBranchIndex);
stackIndex++;
//printf("(in spinKernel.cu::SearchRTreeArray): stackIndex: %i\n", stackIndex);
} else {
interSectArray[foundCount] = tex1Dfetch(texRTreeArray,currentBranchIndex); // We are at a leaf - store corresponding triangle index
foundCount++;
//printf("(in spinKernel.cu::SearchRTreeArray): Tree rectangle: [%g,%g,%g,%g,%g,%g]\n", tex1Dfetch(texRTreeArray,currentBranchIndex+1), tex1Dfetch(texRTreeArray,currentBranchIndex+2),
//tex1Dfetch(texRTreeArray,currentBranchIndex+3), tex1Dfetch(texRTreeArray,currentBranchIndex+4), tex1Dfetch(texRTreeArray,currentBranchIndex+5),
//tex1Dfetch(texRTreeArray,currentBranchIndex+6));
}
}
}
}
return foundCount;
}
//////////////////////////////////////////////////////////////////////////////////////////
// Function name: collDetectRTree
// Description: See whether a particle trying to go from startPos to targetPos
// collides with any triangle in the mesh, using the R-Tree. Return
// the final position of the particle.
//////////////////////////////////////////////////////////////////////////////////////////
__device__ float3 collDetectRTree(float3 startPos, float3 targetPos, float u, uint8 &compartment, uint16 &fiberInside){
float3 endPos = targetPos;
uint hitArray[1200]; // Hitarray will store the indices of the triangles that the particle possible collides with - we are assuming no more than 100
float spinRectangle[6];
collResult result, tempResult;
//float minCollDistSq;
result.collDistSq = 400000000; // Some really large number, will use this to store the smallest distance to a collision point
result.collisionType = 1;
result.collIndex = UINT_MAX;
uint excludedTriangle = UINT_MAX;
float u_max = 1, u_min = 0;
//uint k = 0;
//uint p = 0;
//printf("Compartment: %i\n", compartment);
while (result.collisionType>0){ // If we have detected a collision, we repeat the collision detection for the new, reflected path
//minCollDistSq = 400000000;
//printf("p: %u\n", p);
//p++;
result.collisionType = 0; // First assume that the particle path does not experience any collisions
// Define a rectangle that bounds the particle path from corner to corner
// Finding minx, miny, minz
spinRectangle[0] = startPos.x; if (targetPos.x < spinRectangle[0]){spinRectangle[0] = targetPos.x;}
spinRectangle[1] = startPos.y; if (targetPos.y < spinRectangle[1]){spinRectangle[1] = targetPos.y;}
spinRectangle[2] = startPos.z; if (targetPos.z < spinRectangle[2]){spinRectangle[2] = targetPos.z;}
// Finding maxx, maxy, maxz
spinRectangle[3] = startPos.x; if (targetPos.x > spinRectangle[3]){spinRectangle[3] = targetPos.x;}
spinRectangle[4] = startPos.y; if (targetPos.y > spinRectangle[4]){spinRectangle[4] = targetPos.y;}
spinRectangle[5] = startPos.z; if (targetPos.z > spinRectangle[5]){spinRectangle[5] = targetPos.z;}
// Find the triangles whose bounding rectangles intersect spinRectangle. They are written to hitArray and their number is nHits.
int nHits = SearchRTreeArray(spinRectangle, hitArray, compartment, fiberInside);
//int nHits = 0;
//printf("(in spinKernel.cu::collDetectRTree): nHits: %i\n", nHits);
//printf("(in spinKernel.cu::collDetectRTree): Startpos: [%g,%g,%g]\n", startPos.x, startPos.y, startPos.z);
//printf("(in spinKernel.cu::collDetectRTree): Targetpos: [%g,%g,%g]\n", targetPos.x, targetPos.y, targetPos.z);
//printf("(in spinKernel.cu::collDetectRTree): Compartment: %i\n", compartment);
//printf("(in spinKernel.cu::collDetectRTree): Fiber: %u\n", fiberInside);
//printf("(in spinKernel.cu::collDetectRTree): Excluded triangle: %u\n", excludedTriangle);
//printf("(in spinKernel.cu::collDetectRTree): result.collDistSq: %g\n", result.collDistSq);
// Loop through the triangles in hitArray, see if we have collisions, store the closest collision point in the variable result.
for (uint k=0; k<nHits; k++){
uint triIndex = hitArray[k];
//printf("(in spinKernel.cu::collDetectRTree): hitArray[%u]: %u\n", k, hitArray[k]);
if (triIndex != excludedTriangle){
tempResult = triCollDetect(startPos, targetPos, triIndex);
//if ((tempResult.collisionType>0) & (tempResult.collDistSq < result.collDistSq)){
if ((tempResult.collisionType>0) & (tempResult.collDistSq < result.collDistSq)){
result = tempResult;
//minCollDistSq = tempResult.collDistSq;
}
}
}
// If we have a collision, then we find the resulting point which the particle gets reflected to.
if (result.collisionType>0){
//printf("*\n");
//printf("(in spinKernel.cu::collDetectRTree): Collision!\n");
//printf("(in spinKernel.cu::collDetectRTree): startPos: [%g,%g,%g]\n", startPos.x,startPos.y,startPos.z);
//printf("(in spinKernel.cu::collDetectRTree): targetPos: [%g,%g,%g]\n", targetPos.x,targetPos.y,targetPos.z);
//printf("(in spinKernel.cu::collDetectRTree): Collision point: [%g,%g,%g]\n", result.collPoint.x, result.collPoint.y, result.collPoint.z);
//printf("(in spinKernel.cu::collDetectRTree): Endpos (before assignment): [%g,%g,%g]\n", endPos.x, endPos.y, endPos.z);
//printf("(in spinKernel.cu::collDetectRTree): Collision triangle index: %u\n", result.collIndex);
//printf("(in spinKernel.cu::collDetectRTree): Collision fiber index: %u\n", tex1Dfetch(texTriInfo, result.collIndex*3+0));
//printf("(in spinKernel.cu::collDetectRTree): Collision membrane index: %u\n", tex1Dfetch(texTriInfo, result.collIndex*3+1));
//printf("(in spinKernel.cu::collDetectRTree): u: %g\n", u);
//printf("(in spinKernel.cu::collDetectRTree): u_max: %g, u_min: %g, u_p: %g\n", u_max, u_min, u_max-(u_max-u_min)*k_permeability);
// If u>u_max-(u_max-u_min)*k_permeability, then the particle permeates through the membrane and does not get reflected.
// u is in the range (0,1].
if (u<=u_max-(u_max-u_min)*k_permeability){ // The spin does not permeate the membrane
endPos = reflectPos(startPos, targetPos, result.collPoint, result.collIndex, result.collisionType);
u_max = u_max-(u_max-u_min)*k_permeability;
//printf("(in spinKernel.cu::collDetectRTree): Particle bounces off membrane\n");
//printf("(in spinKernel.cu::collDetectRTree): Endpos: [%g,%g,%g]\n", endPos.x, endPos.y, endPos.z);
//reflectPos(startPos, targetPos, result.collPoint, result.collIndex, result.collisionType);
} else{ // The spin permeates the membrane
u_min = u_max-(u_max-u_min)*k_permeability;
// Change the compartment (and fiber, if appropriate) assignment of the spin
// uint membraneType = tex1Dfetch(texTriInfo, result.collIndex*3+1);
if (compartment == 2){
if (tex1Dfetch(texTriInfo, result.collIndex*3+1) == 0){ // We are going from compartment 2 through axon surface - new compartment is 1
compartment = 1;
} else { // We are going from compartment 2 through myelin surface - new compartment is 0
compartment = 0;
fiberInside = UINT16_MAX;
}
} else if (compartment == 1){
compartment = 2; // We are going from compartment 1 through axon surface - new compartment is 2
} else if (compartment == 3){
compartment = 0; // We are going from compartment 3 through glia surface - new compartment is 0
fiberInside = UINT16_MAX;
} else {
fiberInside = tex1Dfetch(texTriInfo, result.collIndex*3+0);
if (tex1Dfetch(texTriInfo, result.collIndex*3+1) == 1){ // We are going from compartment 0 through myelin surface - new compartment is 2
compartment = 2;
} else { // We are going from compartment 0 through glia surface - new compartment is 3
compartment = 3;
}
}
//printf("(in spinKernel.cu::collDetectRTree): Particle permeates membrane\n");
//printf("(in spinKernel.cu::collDetectRTree): Endpos: [%g,%g,%g]\n", endPos.x, endPos.y, endPos.z);
}
}
// Redefine the start and end points for the reflected path, then repeat until no collision is detected.
startPos = result.collPoint;
targetPos = endPos;
excludedTriangle = result.collIndex; // Make sure we don't detect a collision with the triangle which the particle bounces from
result.collDistSq = 400000000;
}
return endPos;
}
/////////////////////////////////////////////////////////////////////////////////////////////
// Function name: cubeCollDetect
// Description: Determine whether a particle traveling from oPos to pos experiences
// a collision with any of the triangles in cube no. cubeIndex. Triangle
// no. excludedTriangle is not checked - useful if the particle is bouncing
// off that triangle.
/////////////////////////////////////////////////////////////////////////////////////////////
__device__ collResult cubeCollDetect(float3 oPos, float3 pos, uint cubeIndex, uint excludedTriangle, uint* trianglesInCubes, uint* cubeCounter){
uint triIndex, k_max;
collResult result, testCollision;
result.collisionType = 0;
result.collDistSq = 400000000;
result.collIndex = UINT_MAX;
// Loop through membrane types (layers) as appropriate
//for (uint layerIndex = 0; layerIndex < 2; layerIndex++){ // Change later so not to loop through all membrane types
//k_max = tex1Dfetch(texCubeCounter, layerIndex*k_totalNumCubes+cubeIndex); // k_max: the number of triangles in cube cubeIndex on membrane type layerIndex
//k_max = tex1Dfetch(texCubeCounter, cubeIndex);
//cubeIndex = 1275;
k_max = cubeCounter[cubeIndex];
//printf("cubeCounter[%u]: %u\n", cubeIndex, k_max);
for (uint k=0; k<k_max; k++){
// triIndex is the number of the triangle being checked.
//triIndex = tex1Dfetch(texTrianglesInCubes, (layerIndex*k_totalNumCubes+cubeIndex)*k_maxTrianglesPerCube+k);
// triIndex = tex1Dfetch(texTrianglesInCubes, cubeIndex*k_maxTrianglesPerCube+k);
triIndex = trianglesInCubes[cubeIndex*k_maxTrianglesPerCube+k];
//printf("Checking triangle %u\n", triIndex);
if (triIndex != excludedTriangle){
testCollision = triCollDetect(oPos, pos, triIndex);
if ( (testCollision.collisionType>0)&(testCollision.collDistSq<result.collDistSq) ){
result = testCollision;
}
}
}
//triIndex = tex1Dfetch(texTrianglesInCubes, cubeIndex*k_maxTrianglesPerCube+k);
//}
return result;
}
///////////////////////////////////////////////////////////////////////////////////////////////
// Function name: collDetectRectGrid
// Description: Determine whether a particle trying to go from startPos to targetPos
// collides with a triangle, using the method of a rectangular grid (as
// opposed to an R-Tree)
///////////////////////////////////////////////////////////////////////////////////////////////
__device__ float3 collDetectRectGrid(float3 startPos, float3 targetPos, float u, uint8 compartment, uint16 fiberInside, uint* trianglesInCubes, uint* cubeCounter){
//printf("RectGrid ....");
float3 endPos = targetPos;
collResult collCheck;
collCheck.collisionType = 1;
uint excludedTriangle = UINT_MAX, currCube;
uint3 currCubexyz, startCubexyz, endCubexyz;
int3 cubeIncrement;
float u_max = 1.0f, u_min = 0.0f;
while (collCheck.collisionType > 0){
//startCube = calcCubeHashGPU(calcCubePosGPU(startPos, k_cubeLength), k_numCubes); // The cube that the particle starts in
//endCube = calcCubeHashGPU(calcCubePosGPU(targetPos, k_cubeLength), k_numCubes); // The cube that the particle tries to end in
startCubexyz = calcCubePosGPU(startPos);
endCubexyz = calcCubePosGPU(targetPos);
cubeIncrement.x = ( (endCubexyz.x>startCubexyz.x) - (endCubexyz.x<startCubexyz.x) );
cubeIncrement.y = ( (endCubexyz.y>startCubexyz.y) - (endCubexyz.y<startCubexyz.y) );
cubeIncrement.z = ( (endCubexyz.z>startCubexyz.z) - (endCubexyz.z<startCubexyz.z) );
//printf("startCubexyz: [%u,%u,%u]\n", startCubexyz.x, startCubexyz.y, startCubexyz.z);
//printf("endCubexyz: [%u,%u,%u]\n", endCubexyz.x, endCubexyz.y, endCubexyz.z);
//printf("cubeIncrement: [%i,%i,%i]\n", cubeIncrement.x, cubeIncrement.y, cubeIncrement.z);
collCheck.collisionType = 0;
currCubexyz.x = startCubexyz.x;
do {
currCubexyz.y = startCubexyz.y;
do {
currCubexyz.z = startCubexyz.z;
do {
currCube = calcCubeHashGPU(currCubexyz);
//printf("currCubexyz: [%u,%u,%u]\n", currCubexyz.x, currCubexyz.y, currCubexyz.z);
collCheck = cubeCollDetect(startPos, targetPos, currCube, excludedTriangle, trianglesInCubes, cubeCounter);
currCubexyz.z += cubeIncrement.z;
} while ((currCubexyz.z != endCubexyz.z+cubeIncrement.z)&&(collCheck.collisionType == 0));
currCubexyz.y += cubeIncrement.y;
} while ((currCubexyz.y != endCubexyz.y+cubeIncrement.y)&&(collCheck.collisionType == 0));
currCubexyz.x += cubeIncrement.x;
} while ((currCubexyz.x != endCubexyz.x+cubeIncrement.x)&&(collCheck.collisionType == 0));
/*while ((currCubexyz.x != endCubexyz.x+cubeIncrement.x)&&(collCheck.collisionType == 0)){
while ((currCubexyz.y != endCubexyz.y+cubeIncrement.y)&&(collCheck.collisionType == 0)){
while ((currCubexyz.z != endCubexyz.z+cubeIncrement.z)&&(collCheck.collisionType == 0)){
currCubexyz.z += cubeIncrement.z;
currCube = calcCubeHashGPU(currCubexyz);
printf("currCubexyz: [%u,%u,%u]\n", currCubexyz.x, currCubexyz.y, currCubexyz.z);
collCheck = cubeCollDetect(startPos, targetPos, currCube, excludedTriangle, trianglesInCubes, cubeCounter);
}
currCubexyz.y += cubeIncrement.y;
}
currCubexyz.x += cubeIncrement.x;
}*/
if (collCheck.collisionType > 0){
printf("(in collDetectRectGrid): Collision!\n");
printf("(in collDetectRectGrid): Startpos: [%g,%g,%g]\n", startPos.x, startPos.y, startPos.z);
printf("(in collDetectRectGrid): Targetpos: [%g,%g,%g]\n", targetPos.x, targetPos.y, targetPos.z);
printf("(in collDetectRectGrid): Collision pos: [%g,%g,%g]\n", collCheck.collPoint.x, collCheck.collPoint.y, collCheck.collPoint.z);
printf("(in collDetectRectGrid): Collision triangle: %u\n", collCheck.collIndex);
printf("(in collDetectRectGrid): Cube: %u\n", currCube);
printf("(in collDetectRectGrid): Compartment: %u\n", compartment);
printf("(in collDetectRectGrid): FiberInside: %u\n", fiberInside);
if (u<=u_max-(u_max-u_min)*k_permeability){ // The spin does not permeate the membrane
endPos = reflectPos(startPos, targetPos, collCheck.collPoint, collCheck.collIndex, collCheck.collisionType);
u_max = u_max-(u_max-u_min)*k_permeability;
//printf("(in spinKernel.cu::collDetectRTree): Particle bounces off membrane\n");
//printf("(in spinKernel.cu::collDetectRTree): Endpos: [%g,%g,%g]\n", endPos.x, endPos.y, endPos.z);
//reflectPos(startPos, targetPos, collCheck.collPoint, collCheck.collIndex, collCheck.collisionType);
} else{ // The spin permeates the membrane
u_min = u_max-(u_max-u_min)*k_permeability;
// Change the compartment (and fiber, if appropriate) assignment of the spin
// uint membraneType = tex1Dfetch(texTriInfo, collCheck.collIndex*3+1);
if (compartment == 2){
if (tex1Dfetch(texTriInfo, collCheck.collIndex*3+1) == 0){ // We are going from compartment 2 through axon surface - new compartment is 1
compartment = 1;
} else { // We are going from compartment 2 through myelin surface - new compartment is 0
compartment = 0;
fiberInside = UINT16_MAX;
}
} else if (compartment == 1){
compartment = 2; // We are going from compartment 1 through axon surface - new compartment is 2
} else if (compartment == 3){
compartment = 0; // We are going from compartment 3 through glia surface - new compartment is 0
fiberInside = UINT16_MAX;
} else {
fiberInside = tex1Dfetch(texTriInfo, collCheck.collIndex*3+0);
if (tex1Dfetch(texTriInfo, collCheck.collIndex*3+1) == 1){ // We are going from compartment 0 through myelin surface - new compartment is 2
compartment = 2;
} else { // We are going from compartment 0 through glia surface - new compartment is 3
compartment = 3;
}
}
}
}
// Redefine the start and end points for the reflected path, then repeat until no collision is detected.
startPos = collCheck.collPoint;
targetPos = endPos;
excludedTriangle = collCheck.collIndex; // Make sure we don't detect a collision with the triangle which the particle bounces from
}
//printf("test\n\n");
return endPos;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////
// Function name: collDetect
// Description: Determine whether a particle trying to travel from oPos to pos hits a triangle.
// Use either the method of a rectangular grid or an R-Tree.
////////////////////////////////////////////////////////////////////////////////////////////////////////
__device__ float3 collDetect(float3 oPos, float3 pos, float u, uint8 &compartment, uint16 &fiberInside, uint* trianglesInCubes, uint* cubeCounter){
//printf("collDetect ....");
//if (k_triSearchMethod == 0){
return collDetectRectGrid(oPos,pos,u,compartment,fiberInside,trianglesInCubes,cubeCounter);
//} else {
// return collDetectRTree(oPos, pos, u, compartment, fiberInside);
//}
//return pos;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////
// Function name: integrate
// Description: "Main" function for GPU kernel computation, called from spinSystem.cu, invokes all
// the functions above. Computes the spin movement and signal for each spin by
// performing the below computation in parallel on multiple threads.
///////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void integrate(float3* oldPos,
uint2* oldSeed,
//float4* spinInfo,
spinData* spinInfo,
float deltaTime,
float permeability,
uint numBodies,
float gradX, float gradY, float gradZ,
float phaseConstant,
uint itr, uint* trianglesInCubes, uint* cubeCounter, float* m_dStdDevs, float* m_dT2Values)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
//printf("KERNEL\n");
if (index>=m_dNumSpins){
printf("index>=numBodies\n\n");
return;
}
float3 pos = oldPos[index];//make_float3(1.2,1.2,1.2);// // pos = particle position
uint2 seed2 = oldSeed[index];//make_uint2(1.2,1.2); // seed4 = seed values (currently only using first 2 values)
//printf("pos: %f; %f; %f; \n seed2: %u; %u;\n\n ",pos.x,pos.y,pos.z,seed2.x,seed2.y);
//printf("k_reflectionType: %u, k_triSearchMethod: %u, k_numCubes: %u, k_totalNumCubes: %u, k_maxTrianglesPerCube: %u, k_cubeLength: %u, k_nFibers: %u, k_nCompartments: %f, k_deltaTime: %f\n\n ",k_reflectionType,k_triSearchMethod,k_numCubes,k_totalNumCubes,k_maxTrianglesPerCube,k_cubeLength,k_nFibers,k_nCompartments, k_deltaTime);
uint8 compartment = spinInfo[index].compartmentType;
uint16 fiberInside = spinInfo[index].insideFiber;
/////////////////////////////////////////////////////////////////////////////////
// Now apply the brownian motion (free diffusion). We simulate brownian motion
// with a random walk where the x, y, and z componenets are drawn from a
// normal distribution with mean 0 and standard deviation of sqrt(2*ADC*deltaTime).
// From wikipedia http://en.wikipedia.org/wiki/Random_walk:
// In 3D, the variance corresponding to the Green's function of the diffusion equation is:
// sigma^2 = 6*D*t
// sigma^2 corresponds to the distribution associated to the vector R that links the two
// ends of the random walk, in 3D. The variance associated to each component Rx, Ry or Rz
// is only one third of this value (still in 3D).
// Thus, the standard deviation of each component is sqrt(2*ADC*deltaTime)
//////////////////////////////////////////////////////////////////////////////////
//uint rseed[2];
//rseed[0] = seed2.x;
//rseed[1] = seed2.y;
for (uint i=0; i<interations; i++){
// Take a random walk...
// myRandn returns 3 PRNs from a normal distribution with mean 0 and SD of 1.
// So, we just need to scale these with the desired SD to get the displacements
// for the random walk.
// myRandn also returns a bonus uniformly distributed PRN as a side-effect of the
// Box-Muller transform used to generate normally distributed PRNs.
//printf("Random walk %u\n\n",i);
float u;
float3 brnMot;// = make_float3(22.2,22.2,22.2);
//myRandn(rseed, brnMot.y, brnMot.x, brnMot.z, u);
myRandn(seed2, brnMot.y, brnMot.x, brnMot.z, u);
float3 oPos = pos; // Store a copy of the old position before we update it
//printf("k_stdDevs[%u]: %f \n\n",compartment,m_dStdDevs[compartment]);
pos.x += brnMot.x * m_dStdDevs[compartment];
pos.y += brnMot.y * m_dStdDevs[compartment];
pos.z += brnMot.z * m_dStdDevs[compartment];
//printf("In kernel 1\n");
// Test
if (index == 0){
/*printf("i = %u\n", i);
printf("index: %u\n", index);
printf("oPos: [%g,%g,%g]\n", oPos.x,oPos.y,oPos.z);
printf("pos: [%g,%g,%g]\n", pos.x,pos.y,pos.z);
printf("Compartment: %u\n", compartment);
printf("Fiberinside: %u\n", fiberInside);
printf("Signal magnitude: %g\n", signalMagnitude);
printf("Signal phase: %g\n", signalPhase);
printf("u (before assignment): %g\n", u);
printf("rseed after: [%u,%u]\n", rseed[0], rseed[1]);
printf("[%g,%g,%g,%g,%g,%g,%u,%u]\n", oPos.x, oPos.y, oPos.z, pos.x, pos.y, pos.z, compartment, fiberInside);
*/
//oPos.x = 0.0; oPos.y = 0.0; oPos.z = 0.01; // oPos.x = 0.7; oPos.y = 0.0; oPos.z = 0.01;
//pos.x = 0.1; pos.y = 0.2; pos.z = -0.01; // pos.x = 0.632; pos.y = 0.067; pos.z = 0.01;
//compartment = 1;
//fiberInside = 0;
//u = 0.9;
//printf("u (after assignment): %g\n", u);
}
// Do a collision detection for the path the particle is trying to take
//printf("trianglesInCubes[%u]: %u \t cubeCounter[%u]: %u",index,trianglesInCubes[index],index,cubeCounter[index]);
pos = collDetect(oPos,pos,u,compartment,fiberInside,trianglesInCubes,cubeCounter);
//printf("In kernel 2\n");
// Don't let the spin leave the volume
if (pos.x > 1.0f) { pos.x = 1.0f; /*signalMagnitude = 0.0;*/ }
else if (pos.x < -1.0f) { pos.x = -1.0f; /*signalMagnitude = 0.0;*/ }
if (pos.y > 1.0f) { pos.y = 1.0f; /*signalMagnitude = 0.0;*/ }
else if (pos.y < -1.0f) { pos.y = -1.0f; /*signalMagnitude = 0.0;*/ }
if (pos.z > 1.0f) { pos.z = 1.0f; /*signalMagnitude = 0.0;*/ }
else if (pos.z < -1.0f) { pos.z = -1.0f; /*signalMagnitude = 0.0;*/ }
// Update MR signal magnitude
//signalMagnitude += -signalMagnitude/k_T2Values[compartment]*k_deltaTime;
//printf("In kernel 3\n");
//printf("m_dT2Values[%u]= %f\n",compartment,m_dT2Values[compartment]);
spinInfo[index].signalMagnitude += -spinInfo[index].signalMagnitude/m_dT2Values[compartment]*k_deltaTime;
//printf("updated spin signalMagnitude: %f",spinInfo[index].signalMagnitude);
// Update MR signal phase
//signalPhase += (gradX * pos.x + gradY * pos.y + gradZ * pos.z) * phaseConstant;
//printf("phaseConstant: %f, gradientX: %f, gradientY: %f,gradientZ: %f\n",phaseConstant,gradX,gradY,gradZ);
spinInfo[index].signalPhase += (gradX * pos.x + gradY * pos.y + gradZ * pos.z) * phaseConstant;
//printf("updated spin signal phase : %f",spinInfo[index].signalPhase);
}
// Store new position
//oldPos[index] = make_float4(pos, signalPhase);
oldPos[index] = pos;
// Store new seed values
//oldSeed[index].x = rseed[0];
//oldSeed[index].y = rseed[1];
oldSeed[index].x = seed2.x;
oldSeed[index].y = seed2.y;
// Store new values of compartment and signal magnitude and phase
//spinInfo[index].signalMagnitude = signalMagnitude;
//spinInfo[index].signalPhase = signalPhase;
spinInfo[index].compartmentType = compartment;
spinInfo[index].insideFiber = fiberInside;
}
#endif
/////////////////////////////////////////////////////////////////////////////////////////
// File name: spinSystem.cu
// Description: Definition of all CUDA functions that are not used inside the
// kernel.
/////////////////////////////////////////////////////////////////////////////////////////
void print_last_CUDA_error()
/* just run cudaGetLastError() and print the error message if its return value is not cudaSuccess */
{
cudaError_t cudaError;
cudaError = cudaGetLastError();
if(cudaError != cudaSuccess)
{
printf(" cudaGetLastError() returned %d: %s\n", cudaError, cudaGetErrorString(cudaError));
}
}
extern "C"
{
void checkCUDA()
{
// cuda(Free(0));
}
///////////////////////////////////////////////////////////////////////
// Function name: allocateArray
// Description: Allocate memory on device for an array pointed to
// by devPtr of size size.
///////////////////////////////////////////////////////////////////////
void allocateArray(void **devPtr, size_t size)
{
cudaMalloc(devPtr,size);
}
///////////////////////////////////////////////////////////////////////
// Function name: freeArray
// Description: Free up the device memory used by the array pointed
// to by devPtr
///////////////////////////////////////////////////////////////////////
void freeArray(void *devPtr)
{
cudaFree(devPtr);
}
///////////////////////////////////////////////////////////////////////
// Function name: threadSync
// Description: Block until the device has completed all preceding
// requested tasks.
///////////////////////////////////////////////////////////////////////
void threadSync()
{
cudaThreadSynchronize();
}
///////////////////////////////////////////////////////////////////////
// Function name: copyArrayFromDevice
// Description: Copy array from device (pointed to by device parameter)
// to array on host (pointed to by host parameter)
///////////////////////////////////////////////////////////////////////
void copyArrayFromDevice(void* host, const void* device, unsigned int vbo, int size)
{
if (vbo)
cudaGLMapBufferObject((void**)&device, vbo);
cudaMemcpy(host, device, size, cudaMemcpyDeviceToHost);
if (vbo)
cudaGLUnmapBufferObject(vbo);
}
////////////////////////////////////////////////////////////////////////
// Function name: copyArrayToDevice
// Description: Copy array from host (pointed to by host parameter)
// to array on device (pointed to by device parameter)
////////////////////////////////////////////////////////////////////////
void copyArrayToDevice(void* device, const void* host, int offset, int size)
{
cudaMemcpy((char *) device + offset, host, size, cudaMemcpyHostToDevice);
}
/////////////////////////////////////////////////////////////////////////
// Function name: copyConstantToDevice
// Description: Copy constant from host (with name host) to device
// (with name device).
/////////////////////////////////////////////////////////////////////////
void copyConstantToDevice(void* device, const void* host, int offset, int size)
{
cudaMemcpyToSymbol((char *) device, host, size);
}
//////////////////////////////////////////////////////////////////////////
// Function name: registerGLBufferObject
// Description: Registers the buffer object of ID vbo for access by CUDA.
//////////////////////////////////////////////////////////////////////////
void registerGLBufferObject(uint vbo)
{
cudaGLRegisterBufferObject(vbo);
}
//////////////////////////////////////////////////////////////////////////
// Function name: unregisterGLBufferObject
// Description: Unregisters the buffer object of ID vbo for access by CUDA
// and releases any CUDA resources associated with the buffer.
//////////////////////////////////////////////////////////////////////////
void unregisterGLBufferObject(uint vbo)
{
cudaGLUnregisterBufferObject(vbo);
}
//////////////////////////////////////////////////////////////////////////
// The following functions bind/unbind various arrays from host to device
// texture memory.
// Note: Should combine into one function
//////////////////////////////////////////////////////////////////////////
void bindCubeCounter(uint* ptr, int size) // Test
{
cudaBindTexture(0,texCubeCounter,ptr,size*sizeof(uint));
}
void unbindCubeCounter() // Test
{
cudaUnbindTexture(texCubeCounter);
}
void bindTrianglesInCubes(uint* ptr, int size) // Test
{
cudaBindTexture(0,texTrianglesInCubes,ptr,size*sizeof(uint));
}
void unbindTrianglesInCubes() // Test
{
cudaUnbindTexture(texTrianglesInCubes);
}
/*
void bindTrgls(uint* ptr, int size) // Test
{
cudaBindTexture(0,texTrgls,ptr,size*sizeof(uint));
}
void unbindTrgls() // Test
{
cudaUnbindTexture(texTrgls);
}
*/
void bindVertices(float* ptr, int size) // Test
{
if (size>0){
cudaBindTexture(0,texVertices,ptr,size*sizeof(float));
}
}
void unbindVertices() // Test
{
cudaUnbindTexture(texVertices);
}
void bindTriangleHelpers(float* ptr, int size) // Test
{
if (size>0){
cudaBindTexture(0,texTriangleHelpers,ptr,size*sizeof(float));
}
}
void unbindTriangleHelpers() // Test
{
cudaUnbindTexture(texTriangleHelpers);
}
void bindRTreeArray(float* ptr, int size) // Test
{
if (size>0){
cudaBindTexture(0,texRTreeArray,ptr,size*sizeof(float));
}
}
void unbindRTreeArray() // Test
{
cudaUnbindTexture(texRTreeArray);
}
void bindTreeIndexArray(uint* ptr, int size) // Test
{
if (size>0){
cudaBindTexture(0,texCombinedTreeIndex,ptr,size*sizeof(uint));
}
}
void unbindTreeIndexArray() // Test
{
cudaUnbindTexture(texCombinedTreeIndex);
}
void bindTriInfo(uint* ptr, int size) // Test
{
if (size>0){
cudaBindTexture(0,texTriInfo,ptr,size*sizeof(uint));
}
}
void unbindTriInfo() // Test
{
cudaUnbindTexture(texTriInfo);
}
///////////////////////////////////////////////////////////////////////////
// Function name: integrateSystem
// Description: Run the kernel for spin computations
///////////////////////////////////////////////////////////////////////////
void integrateSystem(
uint pos,
uint* randSeed,
spinData* spinInfo,
float deltaTime,
float permeability,
uint numBodies,
float3 gradient,
float phaseConstant,
uint iterations, uint* trianglesInCubes, uint* cubeCounter,uint m_nMembraneTypes, uint m_nPosValues, uint m_numSpins, uint m_nSeedValues, uint m_numCompartments, float* m_hT2Values, float* m_hStdDevs, uint m_reflectionType, uint m_triSearchMethod, uint m_nFibers
){
static bool firstCall = true;
int i =0;
struct cudaDeviceProp devInfo;
cudaGetDeviceProperties(&devInfo, i);
if (firstCall){
// Write out some info
printf("\n\n\n\n\nCUDA device info:\n\n");
printf("Name: %s\n", devInfo.name);
printf("totalGlobalMem: %u\n", devInfo.totalGlobalMem);
printf("sharedMemPerBlock: %u\n", devInfo.sharedMemPerBlock);
printf("regsPerBlock: %u\n", devInfo.regsPerBlock);
printf("warpSize: %u\n", devInfo.warpSize);
printf("memPitch: %u\n", devInfo.memPitch);
printf("maxThreadsPerBlock: %u\n", devInfo.maxThreadsPerBlock);
printf("\n\n");
firstCall = false;
}
cudaError_t cudaerr;
float *Pos;
cudaGLMapBufferObject((void**)&Pos, pos);
float* m_dSpins;
uint* m_dSeed;
//allocateArray((void**)&m_dSpins[0], sizeof(float) * m_nSpinValues * m_numSpins);
//allocateArray((void**)&m_dSpins[1], sizeof(float) * m_nSpinValues * m_numSpins);
//cudaMalloc((void**)m_dSpins, m_numSpins*sizeof(spinData));
//cudaMemcpy(m_dSpins, spinInfo, m_numSpins*sizeof(spinData),cudaMemcpyHostToDevice);
//cudaMalloc((void**)m_dSeed, sizeof(uint) * m_nSeedValues * m_numSpins);
//cudaMemcpy(m_dSeed,randSeed , m_numSpins*m_nSeedValues*sizeof(uint),cudaMemcpyHostToDevice);
float* m_dStdDevs;
float* m_dT2Values;
cudaMalloc((void**)&m_dT2Values, m_numCompartments*sizeof(float));
cudaMemcpy(m_dT2Values, m_hT2Values, m_numCompartments*sizeof(float),cudaMemcpyHostToDevice);
cudaMalloc((void**)&m_dStdDevs, m_numCompartments*sizeof(float));
cudaMemcpy(m_dStdDevs, m_hStdDevs, m_numCompartments*sizeof(float),cudaMemcpyHostToDevice);
//print_last_cuda_error(); // throw error
// Set constants in device memory
cudaMemcpyToSymbol(k_reflectionType, &m_reflectionType, sizeof(uint));
cudaMemcpyToSymbol(k_triSearchMethod, &m_triSearchMethod, sizeof(uint));
cudaMemcpyToSymbol(k_nFibers, &m_nFibers, sizeof(uint));
cudaMemcpyToSymbol(k_nCompartments, &m_numCompartments, sizeof(uint));
cudaMemcpyToSymbol(k_nCompartments, &m_numCompartments, sizeof(uint));
cudaMemcpyToSymbol(k_permeability, &permeability, sizeof(float));
cudaMemcpyToSymbol(k_deltaTime, &deltaTime, sizeof(float));
cudaMemcpyToSymbol(k_T2Values, &m_hT2Values, sizeof(float));
cudaMemcpyToSymbol(k_stdDevs, &m_hStdDevs, sizeof(float));
// Number of threads will normally be 128
int numThreads = min(90, numBodies);
int numBlocks = numBodies/numThreads;
cudaDeviceSynchronize();
// Execute the kernel
integrate<<<numBlocks, numThreads>>>((float3*) pos, (uint2*) randSeed, spinInfo, deltaTime, permeability, numBodies, gradient.x, gradient.y, gradient.z, phaseConstant, iterations, trianglesInCubes, cubeCounter, m_dStdDevs,m_dT2Values);
cudaerr = cudaDeviceSynchronize();
if (cudaerr != CUDA_SUCCESS)
printf("kernel launch failed with error \"%s\".\n",
cudaGetErrorString(cudaerr));
/* cudaGLUnmapBufferObject(Pos);
cudaMemcpy( m_posVbo, m_hPos, sizeof(float)*numBodies*m_nPosValues , cudaMemcpyDeviceToHost );
cudaMemcpy( m_dSeed, m_hSeed, sizeof(uint)*numBodies*m_nSeedValues, cudaMemcpyDeviceToHost );
cudaMemcpy( m_dSpins, m_hSpins, sizeof(spinData)*numBodies, cudaMemcpyDeviceToHost );
cudaMemcpy( m_dTrianglesInCubes, m_hTrianglesInCubes, sizeof(uint)*m_nMembraneTypes*k_totalNumCubes*k_maxTrianglesPerCube, cudaMemcpyDeviceToHost );
cudaMemcpy( m_dCubeCounter, m_hCubeCounter, sizeof(uint)*m_nMembraneTypes*k_totalNumCubes, cudaMemcpyDeviceToHost );
*/
}
//////////////////////////////////////////////////////////////////////////////////////
// Function name: integrateSystemVBO
// Description: Register the vertex buffer object for access by CUDA, perform
// the GPU computation using integrateSystem, then unregister
// the VBO.
//////////////////////////////////////////////////////////////////////////////////////
void integrateSystemVBO(
float* vboPos,
uint* randSeed,
spinData* spinInfo,
float deltaTime,
float permeability,
uint numBodies,
float3 gradient,
float phaseConstant,
uint itr, uint* trianglesInCubes, uint* cubeCounter,uint m_nMembraneTypes, uint m_nPosValues, uint m_numSpins, uint m_nSeedValues, uint m_numCompartments, float* m_hT2Values, float* m_hStdDevs, uint m_reflectionType, uint m_triSearchMethod, uint m_nFibers, uint m_nSpinValues, uint m_totalNumCubes, uint m_maxTrianglesPerCube, uint m_numCubes, uint m_posVbo
){
// cudaGLMapBufferObject((void**)&pos, vboPos);
static bool firstCall = true;
int i =0;
struct cudaDeviceProp devInfo;
cudaGetDeviceProperties(&devInfo, i);
if (firstCall){
// Write out some info
printf("\n\n\n\n\nCUDA device info:\n\n");
printf("Name: %s\n", devInfo.name);
printf("totalGlobalMem: %u\n", devInfo.totalGlobalMem);
printf("sharedMemPerBlock: %u\n", devInfo.sharedMemPerBlock);
printf("regsPerBlock: %u\n", devInfo.regsPerBlock);
printf("warpSize: %u\n", devInfo.warpSize);
printf("memPitch: %u\n", devInfo.memPitch);
printf("maxThreadsPerBlock: %u\n", devInfo.maxThreadsPerBlock);
printf("\n\n");
firstCall = false;
}
cudaError_t cudaerr;
float *pos1;
cudaGLMapBufferObject((void**)&pos1, m_posVbo);
float* m_dStdDevs; // Test
float* m_dT2Values;
spinData* m_dSpins;
uint2* m_dSeed;
float3* t_pos;
float3* pos = (float3*) pos1;
// Trying to cast the Pos variable form float to float3. The same is done for the seed variable (cast from uint to uint2).
/*for(int jj=0;jj<numBodies*m_nPosValues;jj+=3)
{
pos[jj] = make_float3(vboPos[jj],vboPos[jj+1],vboPos[jj+2]);
//std::cout<<pos[jj].x<<"\n";
}
uint2* seeds = (uint2*)malloc(sizeof(uint2)* m_nSeedValues * m_numSpins);
for(int jj=0;jj<* m_nSeedValues * m_numSpins;jj++)
{
pos[jj] = make_float3(vboPos[jj],vboPos[jj+1],vboPos[jj+2]);
//std::cout<<pos[jj].x<<"\n";
}
*/
// Coping the variables from host to device
cudaMalloc(&t_pos, sizeof(float3) * m_nPosValues * m_numSpins);
cudaMemcpy(t_pos, pos, sizeof(float3) * m_nPosValues * m_numSpins, cudaMemcpyHostToDevice);
cudaMalloc((void **)&m_dSpins, sizeof(spinData) * m_numSpins);
cudaMemcpy(m_dSpins, spinInfo, sizeof(spinData) * m_numSpins,cudaMemcpyHostToDevice);
uint2* m_dseed = (uint2*) randSeed;
cudaMalloc((void **)&m_dSeed, sizeof(uint2) * m_nSeedValues * m_numSpins);
cudaMemcpy(m_dSeed,m_dseed , m_numSpins*m_nSeedValues*sizeof(uint),cudaMemcpyHostToDevice);
uint* m_dTrianglesInCubes;
uint* m_dCubeCounter;
cudaMalloc((void**)&m_dTrianglesInCubes, m_totalNumCubes*m_maxTrianglesPerCube*sizeof(uint));
cudaMalloc((void**)&m_dCubeCounter, m_totalNumCubes*sizeof(uint));
cudaMemcpy(m_dTrianglesInCubes, trianglesInCubes, m_totalNumCubes*m_maxTrianglesPerCube*sizeof(uint),cudaMemcpyHostToDevice);
cudaMemcpy(m_dCubeCounter, cubeCounter, m_totalNumCubes*sizeof(uint),cudaMemcpyHostToDevice);
cudaMalloc((void**)&m_dT2Values, m_numCompartments*sizeof(float));
cudaMemcpy(m_dT2Values, m_hT2Values, m_numCompartments*sizeof(float),cudaMemcpyHostToDevice);
cudaMalloc((void**)&m_dStdDevs, m_numCompartments*sizeof(float));
cudaMemcpy(m_dStdDevs, m_hStdDevs, m_numCompartments*sizeof(float),cudaMemcpyHostToDevice);
//print_last_cuda_error();
// Set constants in device memory
cudaMemcpyToSymbol(k_reflectionType, &m_reflectionType, sizeof(uint));
cudaMemcpyToSymbol(k_triSearchMethod, &m_triSearchMethod, sizeof(uint));
cudaMemcpyToSymbol(k_nFibers, &m_nFibers, sizeof(uint));
cudaMemcpyToSymbol(k_nCompartments, &m_numCompartments, sizeof(uint));
cudaMemcpyToSymbol(k_nCompartments, &m_numCompartments, sizeof(uint));
cudaMemcpyToSymbol(k_permeability, &permeability, sizeof(float));
cudaMemcpyToSymbol(k_deltaTime, &deltaTime, sizeof(float));
cudaMemcpyToSymbol(k_T2Values, m_dT2Values, m_numCompartments*sizeof(float));
cudaMemcpyToSymbol(k_stdDevs, m_dStdDevs,m_numCompartments*sizeof(float));
float m_cubeLength = 2.0f / m_numCubes;
cudaMemcpyToSymbol(k_cubeLength, &m_cubeLength, sizeof(float));
cudaMemcpyToSymbol(m_dNumSpins, &numBodies, sizeof(uint));
cudaMemcpyToSymbol(gradientX, &(gradient.x), sizeof(float));
cudaMemcpyToSymbol(gradientY, &(gradient.y), sizeof(float));
cudaMemcpyToSymbol(gradientZ, &(gradient.z), sizeof(float));
cudaMemcpyToSymbol(interations, &(itr), sizeof(float));
// Number of threads will normally be 128
int numThreads = min(128, numBodies);
int numBlocks = numBodies/numThreads;
//cudaDeviceSynchronize();
// Execute the kernel
integrate<<<numBlocks, numThreads>>>(t_pos, m_dSeed, m_dSpins, deltaTime, permeability, numBodies, gradient.x, gradient.y, gradient.z, phaseConstant, itr, trianglesInCubes, cubeCounter, m_dStdDevs,m_dT2Values);
cudaerr = cudaDeviceSynchronize();
if (cudaerr != CUDA_SUCCESS)
printf("kernel launch failed with error \"%s\".\n",
cudaGetErrorString(cudaerr));
//Coping the vraiables form Device to Host
cudaMemcpy( pos, t_pos, m_numSpins*m_nPosValues*sizeof(float3), cudaMemcpyDeviceToHost );
cudaMemcpy( m_dseed, m_dSeed, sizeof(uint2) * m_nSeedValues * m_numSpins, cudaMemcpyDeviceToHost );
cudaMemcpy( spinInfo, m_dSpins, sizeof(spinData) * m_numSpins, cudaMemcpyDeviceToHost );
randSeed = (uint *)m_dseed;
vboPos = (float*)pos;
/*for(int jj=0;jj<numBodies*m_nPosValues;jj+=3)
{
vboPos[jj] = pos[jj].x;
vboPos[jj+1] = pos[jj].y;
vboPos[jj+2] = pos[jj].z;
//std::cout<<pos[jj].x<<"\n";
}
*/
cudaGLUnmapBufferObject(m_posVbo);
cudaFree(t_pos);
cudaFree(m_dSeed);
cudaFree(m_dTrianglesInCubes);
cudaFree(m_dCubeCounter);
cudaFree(m_dSpins);
cudaFree(m_dStdDevs);
cudaFree(m_dT2Values);
}
} // extern "C"
|
fd8bd7c7700319fb61ccabee7c0259f7b0ac7c28.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
// CUDA kernele for forward
template <typename Dtype>
__global__ void PReLUForward(const int n, const int channels, const int dim,
const Dtype* in, Dtype* out, const Dtype* slope_data,
const int div_factor) {
CUDA_KERNEL_LOOP(index, n) {
int c = (index / dim) % channels / div_factor;
out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];
}
}
// CUDA kernel for bottom backward
template <typename Dtype>
__global__ void PReLUBackward(const int n, const int channels, const int dim,
const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff,
const Dtype* slope_data, const int div_factor) {
CUDA_KERNEL_LOOP(index, n) {
int c = (index / dim) % channels / div_factor;
out_diff[index] = in_diff[index] * ((in_data[index] > 0)
+ (in_data[index] <= 0) * slope_data[c]);
}
}
// CUDA kernel for element-wise parameter backward
template <typename Dtype>
__global__ void PReLUParamBackward(const int n, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0);
}
}
template <typename Dtype>
void PReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
const int dim = bottom[0]->count(2);
const int channels = bottom[0]->channels();
const Dtype* slope_data = this->blobs_[0]->gpu_data();
const int div_factor = channel_shared_ ? channels : 1;
// For in-place computation
if (top[0] == bottom[0]) {
caffe_copy(count, bottom_data, bottom_memory_.mutable_gpu_data());
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( PReLUForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, channels, dim, bottom_data, top_data, slope_data, div_factor);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
void PReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
const int count = bottom[0]->count();
const int dim = bottom[0]->count(2);
const int channels = bottom[0]->channels();
// For in-place computation
if (top[0] == bottom[0]) {
bottom_data = bottom_memory_.gpu_data();
}
// Propagte to param
// Since to write bottom diff will affect top diff if top and bottom blobs
// are identical (in-place computaion), we first compute param backward to
// keep top_diff unchanged.
if (this->param_propagate_down_[0]) {
Dtype* slope_diff = this->blobs_[0]->mutable_gpu_diff();
// slope_diff is set as 0, then accumulated over batches
caffe_gpu_set<Dtype>(this->blobs_[0]->count(), Dtype(0), slope_diff);
int cdim = channels * dim;
Dtype dsum = 0.;
for (int n = 0; n < bottom[0]->num(); ++n) {
// compute element-wise diff
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( PReLUParamBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(cdim)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
cdim, top_diff + top[0]->offset(n),
bottom_data + bottom[0]->offset(n),
backward_buff_.mutable_gpu_diff());
CUDA_POST_KERNEL_CHECK;
if (channel_shared_) {
Dtype d;
caffe_gpu_dot<Dtype>(channels * dim, backward_buff_.gpu_diff(),
multiplier_.gpu_data(), &d);
dsum += d;
} else {
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels, dim, 1.,
backward_buff_.gpu_diff(), multiplier_.gpu_data(), 1.,
slope_diff);
}
}
if (channel_shared_) {
caffe_gpu_set(this->blobs_[0]->count(), Dtype(dsum), slope_diff);
}
}
// Propagate to bottom
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* slope_data = this->blobs_[0]->gpu_data();
int div_factor = channel_shared_ ? channels : 1;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( PReLUBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, channels, dim, top_diff, bottom_data, bottom_diff, slope_data,
div_factor);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(PReLULayer);
} // namespace caffe
|
fd8bd7c7700319fb61ccabee7c0259f7b0ac7c28.cu
|
#include <algorithm>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
// CUDA kernele for forward
template <typename Dtype>
__global__ void PReLUForward(const int n, const int channels, const int dim,
const Dtype* in, Dtype* out, const Dtype* slope_data,
const int div_factor) {
CUDA_KERNEL_LOOP(index, n) {
int c = (index / dim) % channels / div_factor;
out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];
}
}
// CUDA kernel for bottom backward
template <typename Dtype>
__global__ void PReLUBackward(const int n, const int channels, const int dim,
const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff,
const Dtype* slope_data, const int div_factor) {
CUDA_KERNEL_LOOP(index, n) {
int c = (index / dim) % channels / div_factor;
out_diff[index] = in_diff[index] * ((in_data[index] > 0)
+ (in_data[index] <= 0) * slope_data[c]);
}
}
// CUDA kernel for element-wise parameter backward
template <typename Dtype>
__global__ void PReLUParamBackward(const int n, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0);
}
}
template <typename Dtype>
void PReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
const int dim = bottom[0]->count(2);
const int channels = bottom[0]->channels();
const Dtype* slope_data = this->blobs_[0]->gpu_data();
const int div_factor = channel_shared_ ? channels : 1;
// For in-place computation
if (top[0] == bottom[0]) {
caffe_copy(count, bottom_data, bottom_memory_.mutable_gpu_data());
}
// NOLINT_NEXT_LINE(whitespace/operators)
PReLUForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, channels, dim, bottom_data, top_data, slope_data, div_factor);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
void PReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
const int count = bottom[0]->count();
const int dim = bottom[0]->count(2);
const int channels = bottom[0]->channels();
// For in-place computation
if (top[0] == bottom[0]) {
bottom_data = bottom_memory_.gpu_data();
}
// Propagte to param
// Since to write bottom diff will affect top diff if top and bottom blobs
// are identical (in-place computaion), we first compute param backward to
// keep top_diff unchanged.
if (this->param_propagate_down_[0]) {
Dtype* slope_diff = this->blobs_[0]->mutable_gpu_diff();
// slope_diff is set as 0, then accumulated over batches
caffe_gpu_set<Dtype>(this->blobs_[0]->count(), Dtype(0), slope_diff);
int cdim = channels * dim;
Dtype dsum = 0.;
for (int n = 0; n < bottom[0]->num(); ++n) {
// compute element-wise diff
// NOLINT_NEXT_LINE(whitespace/operators)
PReLUParamBackward<Dtype><<<CAFFE_GET_BLOCKS(cdim),
CAFFE_CUDA_NUM_THREADS>>>(
cdim, top_diff + top[0]->offset(n),
bottom_data + bottom[0]->offset(n),
backward_buff_.mutable_gpu_diff());
CUDA_POST_KERNEL_CHECK;
if (channel_shared_) {
Dtype d;
caffe_gpu_dot<Dtype>(channels * dim, backward_buff_.gpu_diff(),
multiplier_.gpu_data(), &d);
dsum += d;
} else {
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels, dim, 1.,
backward_buff_.gpu_diff(), multiplier_.gpu_data(), 1.,
slope_diff);
}
}
if (channel_shared_) {
caffe_gpu_set(this->blobs_[0]->count(), Dtype(dsum), slope_diff);
}
}
// Propagate to bottom
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* slope_data = this->blobs_[0]->gpu_data();
int div_factor = channel_shared_ ? channels : 1;
// NOLINT_NEXT_LINE(whitespace/operators)
PReLUBackward<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(
count, channels, dim, top_diff, bottom_data, bottom_diff, slope_data,
div_factor);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(PReLULayer);
} // namespace caffe
|
0da2fd9d9c085e36e2314c166a2e81d82d66da41.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "AllSynapsesDeviceFuncs.h"
#include "AllSynapses.h"
#include "AllSTDPSynapses.h"
#include "AllDynamicSTDPSynapses.h"
// a device variable to store synapse class ID.
__device__ enumClassSynapses classSynapses_d = undefClassSynapses;
/* --------------------------------------*\
|* # Device Functions for advanceSynapses
\* --------------------------------------*/
/*
* Update PSR (post synapse response)
*
* @param allSynapsesDevice Reference to the AllSpikingSynapsesDeviceProperties struct
* on device memory.
* @param iSyn Index of the synapse to set.
* @param simulationStep The current simulation step.
* @param deltaT Inner simulation step duration.
*/
__device__ void changeSpikingSynapsesPSRDevice(AllSpikingSynapsesDeviceProperties* allSynapsesDevice, const BGSIZE iSyn, const uint64_t simulationStep, const BGFLOAT deltaT)
{
BGFLOAT &psr = allSynapsesDevice->psr[iSyn];
BGFLOAT &W = allSynapsesDevice->W[iSyn];
BGFLOAT &decay = allSynapsesDevice->decay[iSyn];
psr += ( W / decay ); // calculate psr
}
/*
* Update PSR (post synapse response)
*
* @param allSynapsesDevice Reference to the AllDSSynapsesDeviceProperties struct
* on device memory.
* @param iSyn Index of the synapse to set.
* @param simulationStep The current simulation step.
* @param deltaT Inner simulation step duration.
*/
__device__ void changeDSSynapsePSRDevice(AllDSSynapsesDeviceProperties* allSynapsesDevice, const BGSIZE iSyn, const uint64_t simulationStep, const BGFLOAT deltaT)
{
//assert( iSyn < allSynapsesDevice->maxSynapsesPerNeuron * allSynapsesDevice->count_neurons );
uint64_t &lastSpike = allSynapsesDevice->lastSpike[iSyn];
BGFLOAT &r = allSynapsesDevice->r[iSyn];
BGFLOAT &u = allSynapsesDevice->u[iSyn];
BGFLOAT D = allSynapsesDevice->D[iSyn];
BGFLOAT F = allSynapsesDevice->F[iSyn];
BGFLOAT U = allSynapsesDevice->U[iSyn];
BGFLOAT W = allSynapsesDevice->W[iSyn];
BGFLOAT &psr = allSynapsesDevice->psr[iSyn];
BGFLOAT decay = allSynapsesDevice->decay[iSyn];
// adjust synapse parameters
if (lastSpike != ULONG_MAX) {
BGFLOAT isi = (simulationStep - lastSpike) * deltaT ;
r = 1 + ( r * ( 1 - u ) - 1 ) * exp( -isi / D );
u = U + u * ( 1 - U ) * exp( -isi / F );
}
psr += ( ( W / decay ) * u * r );// calculate psr
lastSpike = simulationStep; // record the time of the spike
}
/*
* Checks if there is an input spike in the queue.
*
* @param[in] allSynapsesDevice Pointer to AllSpikingSynapsesDeviceProperties structures
* on device memory.
* @param[in] iSyn Index of the Synapse to check.
* @return true if there is an input spike event.
*/
__device__ bool isSpikingSynapsesSpikeQueueDevice(AllSpikingSynapsesDeviceProperties* allSynapsesDevice, BGSIZE iSyn)
{
uint32_t &delay_queue = allSynapsesDevice->delayQueue[iSyn];
int &delayIdx = allSynapsesDevice->delayIdx[iSyn];
int ldelayQueue = allSynapsesDevice->ldelayQueue[iSyn];
uint32_t delayMask = (0x1 << delayIdx);
bool isFired = delay_queue & (delayMask);
delay_queue &= ~(delayMask);
if ( ++delayIdx >= ldelayQueue ) {
delayIdx = 0;
}
return isFired;
}
/*
* Adjust synapse weight according to the Spike-timing-dependent synaptic modification
* induced by natural spike trains
*
* @param allSynapsesDevice Pointer to the AllSTDPSynapsesDeviceProperties structures
* on device memory.
* @param iSyn Index of the synapse to set.
* @param delta Pre/post synaptic spike interval.
* @param epost Params for the rule given in Froemke and Dan (2002).
* @param epre Params for the rule given in Froemke and Dan (2002).
*/
__device__ void stdpLearningDevice(AllSTDPSynapsesDeviceProperties* allSynapsesDevice, const BGSIZE iSyn, double delta, double epost, double epre)
{
BGFLOAT STDPgap = allSynapsesDevice->STDPgap[iSyn];
BGFLOAT muneg = allSynapsesDevice->muneg[iSyn];
BGFLOAT mupos = allSynapsesDevice->mupos[iSyn];
BGFLOAT tauneg = allSynapsesDevice->tauneg[iSyn];
BGFLOAT taupos = allSynapsesDevice->taupos[iSyn];
BGFLOAT Aneg = allSynapsesDevice->Aneg[iSyn];
BGFLOAT Apos = allSynapsesDevice->Apos[iSyn];
BGFLOAT Wex = allSynapsesDevice->Wex[iSyn];
BGFLOAT &W = allSynapsesDevice->W[iSyn];
BGFLOAT dw;
if (delta < -STDPgap) {
// Depression
dw = pow(W, muneg) * Aneg * exp(delta / tauneg);
} else if (delta > STDPgap) {
// Potentiation
dw = pow(Wex - W, mupos) * Apos * exp(-delta / taupos);
} else {
return;
}
W += epost * epre * dw;
// check the sign
if ((Wex < 0 && W > 0) || (Wex > 0 && W < 0)) W = 0;
// check for greater Wmax
if (fabs(W) > fabs(Wex)) W = Wex;
DEBUG_SYNAPSE(
printf("AllSTDPSynapses::stdpLearning:\n");
printf(" iSyn: %d\n", iSyn);
printf(" delta: %f\n", delta);
printf(" epre: %f\n", epre);
printf(" epost: %f\n", epost);
printf(" dw: %f\n", dw);
printf(" W: %f\n\n", W);
);
}
/*
* Checks if there is an input spike in the queue.
*
* @param[in] allSynapsesDevice Pointer to AllSTDPSynapsesDeviceProperties structures
* on device memory.
* @param[in] iSyn Index of the Synapse to check.
* @return true if there is an input spike event.
*/
__device__ bool isSTDPSynapseSpikeQueuePostDevice(AllSTDPSynapsesDeviceProperties* allSynapsesDevice, BGSIZE iSyn)
{
uint32_t &delay_queue = allSynapsesDevice->delayQueuePost[iSyn];
int &delayIdx = allSynapsesDevice->delayIdxPost[iSyn];
int ldelayQueue = allSynapsesDevice->ldelayQueuePost[iSyn];
uint32_t delayMask = (0x1 << delayIdx);
bool isFired = delay_queue & (delayMask);
delay_queue &= ~(delayMask);
if ( ++delayIdx >= ldelayQueue ) {
delayIdx = 0;
}
return isFired;
}
/*
* Gets the spike history of the neuron.
*
* @param allNeuronsDevice Reference to the allNeurons struct on device memory.
* @param index Index of the neuron to get spike history.
* @param offIndex Offset of the history beffer to get.
* -1 will return the last spike.
* @param max_spikes Maximum number of spikes per neuron per epoch.
* @return Spike history.
*/
__device__ uint64_t getSTDPSynapseSpikeHistoryDevice(AllSpikingNeuronsDeviceProperties* allNeuronsDevice, int index, int offIndex, int max_spikes)
{
// offIndex is a minus offset
int idxSp = (allNeuronsDevice->spikeCount[index] + allNeuronsDevice->spikeCountOffset[index] + max_spikes + offIndex) % max_spikes;
return allNeuronsDevice->spike_history[index][idxSp];
}
/* --------------------------------------*\
|* # Global Functions for advanceSynapses
\* --------------------------------------*/
/*
* CUDA code for advancing spiking synapses.
* Perform updating synapses for one time step.
*
* @param[in] total_synapse_counts Number of synapses.
* @param synapseIndexMapDevice Reference to the SynapseIndexMap on device memory.
* @param[in] simulationStep The current simulation step.
* @param[in] deltaT Inner simulation step duration.
* @param[in] allSynapsesDevice Pointer to AllSpikingSynapsesDeviceProperties structures
* on device memory.
*/
__global__ void advanceSpikingSynapsesDevice ( int total_synapse_counts, SynapseIndexMap* synapseIndexMapDevice, uint64_t simulationStep, const BGFLOAT deltaT, AllSpikingSynapsesDeviceProperties* allSynapsesDevice ) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if ( idx >= total_synapse_counts )
return;
BGSIZE iSyn = synapseIndexMapDevice->activeSynapseIndex[idx];
BGFLOAT &psr = allSynapsesDevice->psr[iSyn];
BGFLOAT decay = allSynapsesDevice->decay[iSyn];
// Checks if there is an input spike in the queue.
bool isFired = isSpikingSynapsesSpikeQueueDevice(allSynapsesDevice, iSyn);
// is an input in the queue?
if (isFired) {
switch (classSynapses_d) {
case classAllSpikingSynapses:
changeSpikingSynapsesPSRDevice(static_cast<AllSpikingSynapsesDeviceProperties*>(allSynapsesDevice), iSyn, simulationStep, deltaT);
break;
case classAllDSSynapses:
changeDSSynapsePSRDevice(static_cast<AllDSSynapsesDeviceProperties*>(allSynapsesDevice), iSyn, simulationStep, deltaT);
break;
default:
assert(false);
}
}
// decay the post spike response
psr *= decay;
}
/*
* CUDA code for advancing STDP synapses.
* Perform updating synapses for one time step.
*
* @param[in] total_synapse_counts Number of synapses.
* @param synapseIndexMapDevice Reference to the SynapseIndexMap on device memory.
* @param[in] simulationStep The current simulation step.
* @param[in] deltaT Inner simulation step duration.
* @param[in] allSynapsesDevice Pointer to AllSTDPSynapsesDeviceProperties structures
* on device memory.
*/
__global__ void advanceSTDPSynapsesDevice ( int total_synapse_counts, SynapseIndexMap* synapseIndexMapDevice, uint64_t simulationStep, const BGFLOAT deltaT, AllSTDPSynapsesDeviceProperties* allSynapsesDevice, AllSpikingNeuronsDeviceProperties* allNeuronsDevice, int max_spikes, int width ) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if ( idx >= total_synapse_counts )
return;
BGSIZE iSyn = synapseIndexMapDevice->activeSynapseIndex[idx];
BGFLOAT &decay = allSynapsesDevice->decay[iSyn];
BGFLOAT &psr = allSynapsesDevice->psr[iSyn];
// is an input in the queue?
bool fPre = isSpikingSynapsesSpikeQueueDevice(allSynapsesDevice, iSyn);
bool fPost = isSTDPSynapseSpikeQueuePostDevice(allSynapsesDevice, iSyn);
if (fPre || fPost) {
BGFLOAT &tauspre = allSynapsesDevice->tauspre[iSyn];
BGFLOAT &tauspost = allSynapsesDevice->tauspost[iSyn];
BGFLOAT &taupos = allSynapsesDevice->taupos[iSyn];
BGFLOAT &tauneg = allSynapsesDevice->tauneg[iSyn];
int &total_delay = allSynapsesDevice->total_delay[iSyn];
bool &useFroemkeDanSTDP = allSynapsesDevice->useFroemkeDanSTDP[iSyn];
// pre and post neurons index
int idxPre = allSynapsesDevice->sourceNeuronIndex[iSyn];
int idxPost = allSynapsesDevice->destNeuronIndex[iSyn];
int64_t spikeHistory, spikeHistory2;
BGFLOAT delta;
BGFLOAT epre, epost;
if (fPre) { // preSpikeHit
// spikeCount points to the next available position of spike_history,
// so the getSpikeHistory w/offset = -2 will return the spike time
// just one before the last spike.
spikeHistory = getSTDPSynapseSpikeHistoryDevice(allNeuronsDevice, idxPre, -2, max_spikes);
if (spikeHistory > 0 && useFroemkeDanSTDP) {
// delta will include the transmission delay
delta = ((int64_t)simulationStep - spikeHistory) * deltaT;
epre = 1.0 - exp(-delta / tauspre);
} else {
epre = 1.0;
}
// call the learning function stdpLearning() for each pair of
// pre-post spikes
int offIndex = -1; // last spike
while (true) {
spikeHistory = getSTDPSynapseSpikeHistoryDevice(allNeuronsDevice, idxPost, offIndex, max_spikes);
if (spikeHistory == ULONG_MAX)
break;
// delta is the spike interval between pre-post spikes
delta = (spikeHistory - (int64_t)simulationStep) * deltaT;
DEBUG_SYNAPSE(
printf("advanceSTDPSynapsesDevice: fPre\n");
printf(" iSyn: %d\n", iSyn);
printf(" idxPre: %d\n", idxPre);
printf(" idxPost: %d\n", idxPost);
printf(" spikeHistory: %d\n", spikeHistory);
printf(" simulationStep: %d\n", simulationStep);
printf(" delta: %f\n\n", delta);
);
if (delta <= -3.0 * tauneg)
break;
if (useFroemkeDanSTDP) {
spikeHistory2 = getSTDPSynapseSpikeHistoryDevice(allNeuronsDevice, idxPost, offIndex-1, max_spikes);
if (spikeHistory2 == ULONG_MAX)
break;
epost = 1.0 - exp(-((spikeHistory - spikeHistory2) * deltaT) / tauspost);
} else {
epost = 1.0;
}
stdpLearningDevice(allSynapsesDevice, iSyn, delta, epost, epre);
--offIndex;
}
switch (classSynapses_d) {
case classAllSTDPSynapses:
changeSpikingSynapsesPSRDevice(static_cast<AllSpikingSynapsesDeviceProperties*>(allSynapsesDevice), iSyn, simulationStep, deltaT);
break;
case classAllDynamicSTDPSynapses:
// Note: we cast void * over the allSynapsesDevice, then recast it,
// because AllDSSynapsesDeviceProperties inherited properties from
// the AllDSSynapsesDeviceProperties and the AllSTDPSynapsesDeviceProperties.
changeDSSynapsePSRDevice(static_cast<AllDSSynapsesDeviceProperties*>((void *)allSynapsesDevice), iSyn, simulationStep, deltaT);
break;
default:
assert(false);
}
}
if (fPost) { // postSpikeHit
// spikeCount points to the next available position of spike_history,
// so the getSpikeHistory w/offset = -2 will return the spike time
// just one before the last spike.
spikeHistory = getSTDPSynapseSpikeHistoryDevice(allNeuronsDevice, idxPost, -2, max_spikes);
if (spikeHistory > 0 && useFroemkeDanSTDP) {
// delta will include the transmission delay
delta = ((int64_t)simulationStep - spikeHistory) * deltaT;
epost = 1.0 - exp(-delta / tauspost);
} else {
epost = 1.0;
}
// call the learning function stdpLearning() for each pair of
// post-pre spikes
int offIndex = -1; // last spike
while (true) {
spikeHistory = getSTDPSynapseSpikeHistoryDevice(allNeuronsDevice, idxPre, offIndex, max_spikes);
if (spikeHistory == ULONG_MAX)
break;
// delta is the spike interval between post-pre spikes
delta = ((int64_t)simulationStep - spikeHistory - total_delay) * deltaT;
DEBUG_SYNAPSE(
printf("advanceSTDPSynapsesDevice: fPost\n");
printf(" iSyn: %d\n", iSyn);
printf(" idxPre: %d\n", idxPre);
printf(" idxPost: %d\n", idxPost);
printf(" spikeHistory: %d\n", spikeHistory);
printf(" simulationStep: %d\n", simulationStep);
printf(" delta: %f\n\n", delta);
);
if (delta <= 0 || delta >= 3.0 * taupos)
break;
if (useFroemkeDanSTDP) {
spikeHistory2 = getSTDPSynapseSpikeHistoryDevice(allNeuronsDevice, idxPre, offIndex-1, max_spikes);
if (spikeHistory2 == ULONG_MAX)
break;
epre = 1.0 - exp(-((spikeHistory - spikeHistory2) * deltaT) / tauspre);
} else {
epre = 1.0;
}
stdpLearningDevice(allSynapsesDevice, iSyn, delta, epost, epre);
--offIndex;
}
}
}
// decay the post spike response
psr *= decay;
}
/* ------------------------------------*\
|* # Device Functions for createSynapse
\* ------------------------------------*/
/*
* Return 1 if originating neuron is excitatory, -1 otherwise.
*
* @param[in] t synapseType I to I, I to E, E to I, or E to E
* @return 1 or -1
*/
__device__ int synSign( synapseType t )
{
switch ( t )
{
case II:
case IE:
return -1;
case EI:
case EE:
return 1;
}
return 0;
}
/*
* Create a Spiking Synapse and connect it to the model.
*
* @param allSynapsesDevice Pointer to the AllSpikingSynapsesDeviceProperties structures
* on device memory.
* @param neuron_index Index of the source neuron.
* @param synapse_index Index of the Synapse to create.
* @param source_x X location of source.
* @param source_y Y location of source.
* @param dest_x X location of destination.
* @param dest_y Y location of destination.
* @param sum_point Pointer to the summation point.
* @param deltaT The time step size.
* @param type Type of the Synapse to create.
*/
__device__ void createSpikingSynapse(AllSpikingSynapsesDeviceProperties* allSynapsesDevice, const int neuron_index, const int synapse_index, int source_index, int dest_index, BGFLOAT *sum_point, const BGFLOAT deltaT, synapseType type)
{
BGFLOAT delay;
BGSIZE max_synapses = allSynapsesDevice->maxSynapsesPerNeuron;
BGSIZE iSyn = max_synapses * neuron_index + synapse_index;
allSynapsesDevice->in_use[iSyn] = true;
allSynapsesDevice->summationPoint[iSyn] = sum_point;
allSynapsesDevice->destNeuronIndex[iSyn] = dest_index;
allSynapsesDevice->sourceNeuronIndex[iSyn] = source_index;
allSynapsesDevice->W[iSyn] = synSign(type) * 10.0e-9;
allSynapsesDevice->delayQueue[iSyn] = 0;
allSynapsesDevice->delayIdx[iSyn] = 0;
allSynapsesDevice->ldelayQueue[iSyn] = LENGTH_OF_DELAYQUEUE;
allSynapsesDevice->psr[iSyn] = 0.0;
allSynapsesDevice->type[iSyn] = type;
allSynapsesDevice->tau[iSyn] = DEFAULT_tau;
BGFLOAT tau;
switch (type) {
case II:
tau = 6e-3;
delay = 0.8e-3;
break;
case IE:
tau = 6e-3;
delay = 0.8e-3;
break;
case EI:
tau = 3e-3;
delay = 0.8e-3;
break;
case EE:
tau = 3e-3;
delay = 1.5e-3;
break;
default:
break;
}
allSynapsesDevice->tau[iSyn] = tau;
allSynapsesDevice->decay[iSyn] = exp( -deltaT / tau );
allSynapsesDevice->total_delay[iSyn] = static_cast<int>( delay / deltaT ) + 1;
uint32_t size = allSynapsesDevice->total_delay[iSyn] / ( sizeof(uint8_t) * 8 ) + 1;
assert( size <= BYTES_OF_DELAYQUEUE );
}
/*
* Create a DS Synapse and connect it to the model.
*
* @param allSynapsesDevice Pointer to the AllDSSynapsesDeviceProperties structures
* on device memory.
* @param neuron_index Index of the source neuron.
* @param synapse_index Index of the Synapse to create.
* @param source_x X location of source.
* @param source_y Y location of source.
* @param dest_x X location of destination.
* @param dest_y Y location of destination.
* @param sum_point Pointer to the summation point.
* @param deltaT The time step size.
* @param type Type of the Synapse to create.
*/
__device__ void createDSSynapse(AllDSSynapsesDeviceProperties* allSynapsesDevice, const int neuron_index, const int synapse_index, int source_index, int dest_index, BGFLOAT *sum_point, const BGFLOAT deltaT, synapseType type)
{
BGFLOAT delay;
BGSIZE max_synapses = allSynapsesDevice->maxSynapsesPerNeuron;
BGSIZE iSyn = max_synapses * neuron_index + synapse_index;
allSynapsesDevice->in_use[iSyn] = true;
allSynapsesDevice->summationPoint[iSyn] = sum_point;
allSynapsesDevice->destNeuronIndex[iSyn] = dest_index;
allSynapsesDevice->sourceNeuronIndex[iSyn] = source_index;
allSynapsesDevice->W[iSyn] = synSign(type) * 10.0e-9;
allSynapsesDevice->delayQueue[iSyn] = 0;
allSynapsesDevice->delayIdx[iSyn] = 0;
allSynapsesDevice->ldelayQueue[iSyn] = LENGTH_OF_DELAYQUEUE;
allSynapsesDevice->psr[iSyn] = 0.0;
allSynapsesDevice->r[iSyn] = 1.0;
allSynapsesDevice->u[iSyn] = 0.4; // DEFAULT_U
allSynapsesDevice->lastSpike[iSyn] = ULONG_MAX;
allSynapsesDevice->type[iSyn] = type;
allSynapsesDevice->U[iSyn] = DEFAULT_U;
allSynapsesDevice->tau[iSyn] = DEFAULT_tau;
BGFLOAT U;
BGFLOAT D;
BGFLOAT F;
BGFLOAT tau;
switch (type) {
case II:
U = 0.32;
D = 0.144;
F = 0.06;
tau = 6e-3;
delay = 0.8e-3;
break;
case IE:
U = 0.25;
D = 0.7;
F = 0.02;
tau = 6e-3;
delay = 0.8e-3;
break;
case EI:
U = 0.05;
D = 0.125;
F = 1.2;
tau = 3e-3;
delay = 0.8e-3;
break;
case EE:
U = 0.5;
D = 1.1;
F = 0.05;
tau = 3e-3;
delay = 1.5e-3;
break;
default:
break;
}
allSynapsesDevice->U[iSyn] = U;
allSynapsesDevice->D[iSyn] = D;
allSynapsesDevice->F[iSyn] = F;
allSynapsesDevice->tau[iSyn] = tau;
allSynapsesDevice->decay[iSyn] = exp( -deltaT / tau );
allSynapsesDevice->total_delay[iSyn] = static_cast<int>( delay / deltaT ) + 1;
uint32_t size = allSynapsesDevice->total_delay[iSyn] / ( sizeof(uint8_t) * 8 ) + 1;
assert( size <= BYTES_OF_DELAYQUEUE );
}
/*
* Create a Synapse and connect it to the model.
*
* @param allSynapsesDevice Pointer to the AllSTDPSynapsesDeviceProperties structures
* on device memory.
* @param neuron_index Index of the source neuron.
* @param synapse_index Index of the Synapse to create.
* @param source_x X location of source.
* @param source_y Y location of source.
* @param dest_x X location of destination.
* @param dest_y Y location of destination.
* @param sum_point Pointer to the summation point.
* @param deltaT The time step size.
* @param type Type of the Synapse to create.
*/
__device__ void createSTDPSynapse(AllSTDPSynapsesDeviceProperties* allSynapsesDevice, const int neuron_index, const int synapse_index, int source_index, int dest_index, BGFLOAT *sum_point, const BGFLOAT deltaT, synapseType type)
{
BGFLOAT delay;
BGSIZE max_synapses = allSynapsesDevice->maxSynapsesPerNeuron;
BGSIZE iSyn = max_synapses * neuron_index + synapse_index;
allSynapsesDevice->in_use[iSyn] = true;
allSynapsesDevice->summationPoint[iSyn] = sum_point;
allSynapsesDevice->destNeuronIndex[iSyn] = dest_index;
allSynapsesDevice->sourceNeuronIndex[iSyn] = source_index;
allSynapsesDevice->W[iSyn] = synSign(type) * 10.0e-9;
allSynapsesDevice->delayQueue[iSyn] = 0;
allSynapsesDevice->delayIdx[iSyn] = 0;
allSynapsesDevice->ldelayQueue[iSyn] = LENGTH_OF_DELAYQUEUE;
allSynapsesDevice->psr[iSyn] = 0.0;
allSynapsesDevice->type[iSyn] = type;
allSynapsesDevice->tau[iSyn] = DEFAULT_tau;
BGFLOAT tau;
switch (type) {
case II:
tau = 6e-3;
delay = 0.8e-3;
break;
case IE:
tau = 6e-3;
delay = 0.8e-3;
break;
case EI:
tau = 3e-3;
delay = 0.8e-3;
break;
case EE:
tau = 3e-3;
delay = 1.5e-3;
break;
default:
break;
}
allSynapsesDevice->tau[iSyn] = tau;
allSynapsesDevice->decay[iSyn] = exp( -deltaT / tau );
allSynapsesDevice->total_delay[iSyn] = static_cast<int>( delay / deltaT ) + 1;
uint32_t size = allSynapsesDevice->total_delay[iSyn] / ( sizeof(uint8_t) * 8 ) + 1;
assert( size <= BYTES_OF_DELAYQUEUE );
allSynapsesDevice->Apos[iSyn] = 0.5;
allSynapsesDevice->Aneg[iSyn] = -0.5;
allSynapsesDevice->STDPgap[iSyn] = 2e-3;
allSynapsesDevice->total_delayPost[iSyn] = 0;
allSynapsesDevice->tauspost[iSyn] = 0;
allSynapsesDevice->tauspre[iSyn] = 0;
allSynapsesDevice->taupos[iSyn] = 15e-3;
allSynapsesDevice->tauneg[iSyn] = 35e-3;
allSynapsesDevice->Wex[iSyn] = 1.0;
allSynapsesDevice->mupos[iSyn] = 0;
allSynapsesDevice->muneg[iSyn] = 0;
allSynapsesDevice->useFroemkeDanSTDP[iSyn] = false;
}
/*
* Create a Synapse and connect it to the model.
*
* @param allSynapsesDevice Pointer to the AllDynamicSTDPSynapsesDeviceProperties structures
* on device memory.
* @param neuron_index Index of the source neuron.
* @param synapse_index Index of the Synapse to create.
* @param source_x X location of source.
* @param source_y Y location of source.
* @param dest_x X location of destination.
* @param dest_y Y location of destination.
* @param sum_point Pointer to the summation point.
* @param deltaT The time step size.
* @param type Type of the Synapse to create.
*/
__device__ void createDynamicSTDPSynapse(AllDynamicSTDPSynapsesDeviceProperties* allSynapsesDevice, const int neuron_index, const int synapse_index, int source_index, int dest_index, BGFLOAT *sum_point, const BGFLOAT deltaT, synapseType type)
{
BGFLOAT delay;
BGSIZE max_synapses = allSynapsesDevice->maxSynapsesPerNeuron;
BGSIZE iSyn = max_synapses * neuron_index + synapse_index;
allSynapsesDevice->in_use[iSyn] = true;
allSynapsesDevice->summationPoint[iSyn] = sum_point;
allSynapsesDevice->destNeuronIndex[iSyn] = dest_index;
allSynapsesDevice->sourceNeuronIndex[iSyn] = source_index;
allSynapsesDevice->W[iSyn] = synSign(type) * 10.0e-9;
allSynapsesDevice->delayQueue[iSyn] = 0;
allSynapsesDevice->delayIdx[iSyn] = 0;
allSynapsesDevice->ldelayQueue[iSyn] = LENGTH_OF_DELAYQUEUE;
allSynapsesDevice->psr[iSyn] = 0.0;
allSynapsesDevice->r[iSyn] = 1.0;
allSynapsesDevice->u[iSyn] = 0.4; // DEFAULT_U
allSynapsesDevice->lastSpike[iSyn] = ULONG_MAX;
allSynapsesDevice->type[iSyn] = type;
allSynapsesDevice->U[iSyn] = DEFAULT_U;
allSynapsesDevice->tau[iSyn] = DEFAULT_tau;
BGFLOAT U;
BGFLOAT D;
BGFLOAT F;
BGFLOAT tau;
switch (type) {
case II:
U = 0.32;
D = 0.144;
F = 0.06;
tau = 6e-3;
delay = 0.8e-3;
break;
case IE:
U = 0.25;
D = 0.7;
F = 0.02;
tau = 6e-3;
delay = 0.8e-3;
break;
case EI:
U = 0.05;
D = 0.125;
F = 1.2;
tau = 3e-3;
delay = 0.8e-3;
break;
case EE:
U = 0.5;
D = 1.1;
F = 0.05;
tau = 3e-3;
delay = 1.5e-3;
break;
default:
break;
}
allSynapsesDevice->U[iSyn] = U;
allSynapsesDevice->D[iSyn] = D;
allSynapsesDevice->F[iSyn] = F;
allSynapsesDevice->tau[iSyn] = tau;
allSynapsesDevice->decay[iSyn] = exp( -deltaT / tau );
allSynapsesDevice->total_delay[iSyn] = static_cast<int>( delay / deltaT ) + 1;
uint32_t size = allSynapsesDevice->total_delay[iSyn] / ( sizeof(uint8_t) * 8 ) + 1;
assert( size <= BYTES_OF_DELAYQUEUE );
allSynapsesDevice->Apos[iSyn] = 0.5;
allSynapsesDevice->Aneg[iSyn] = -0.5;
allSynapsesDevice->STDPgap[iSyn] = 2e-3;
allSynapsesDevice->total_delayPost[iSyn] = 0;
allSynapsesDevice->tauspost[iSyn] = 0;
allSynapsesDevice->tauspre[iSyn] = 0;
allSynapsesDevice->taupos[iSyn] = 15e-3;
allSynapsesDevice->tauneg[iSyn] = 35e-3;
allSynapsesDevice->Wex[iSyn] = 1.0;
allSynapsesDevice->mupos[iSyn] = 0;
allSynapsesDevice->muneg[iSyn] = 0;
allSynapsesDevice->useFroemkeDanSTDP[iSyn] = false;
}
/*
* Adds a synapse to the network. Requires the locations of the source and
* destination neurons.
*
* @param allSynapsesDevice Pointer to the AllSpikingSynapsesDeviceProperties structures
* on device memory.
* @param type Type of the Synapse to create.
* @param src_neuron Index of the source neuron.
* @param dest_neuron Index of the destination neuron.
* @param source_x X location of source.
* @param source_y Y location of source.
* @param dest_x X location of destination.
* @param dest_y Y location of destination.
* @param sum_point Pointer to the summation point.
* @param deltaT The time step size.
* @param W_d Array of synapse weight.
* @param num_neurons The number of neurons.
*/
__device__ void addSpikingSynapse(AllSpikingSynapsesDeviceProperties* allSynapsesDevice, synapseType type, const int src_neuron, const int dest_neuron, int source_index, int dest_index, BGFLOAT *sum_point, const BGFLOAT deltaT, BGFLOAT* W_d, int num_neurons)
{
if (allSynapsesDevice->synapse_counts[src_neuron] >= allSynapsesDevice->maxSynapsesPerNeuron) {
return; // TODO: ERROR!
}
// add it to the list
BGSIZE synapse_index;
BGSIZE max_synapses = allSynapsesDevice->maxSynapsesPerNeuron;
BGSIZE iSync = max_synapses * src_neuron;
for (synapse_index = 0; synapse_index < max_synapses; synapse_index++) {
if (!allSynapsesDevice->in_use[iSync + synapse_index]) {
break;
}
}
allSynapsesDevice->synapse_counts[src_neuron]++;
// create a synapse
switch (classSynapses_d) {
case classAllSpikingSynapses:
createSpikingSynapse(allSynapsesDevice, src_neuron, synapse_index, source_index, dest_index, sum_point, deltaT, type );
break;
case classAllDSSynapses:
createDSSynapse(static_cast<AllDSSynapsesDeviceProperties *>(allSynapsesDevice), src_neuron, synapse_index, source_index, dest_index, sum_point, deltaT, type );
break;
case classAllSTDPSynapses:
createSTDPSynapse(static_cast<AllSTDPSynapsesDeviceProperties *>(allSynapsesDevice), src_neuron, synapse_index, source_index, dest_index, sum_point, deltaT, type );
break;
case classAllDynamicSTDPSynapses:
createDynamicSTDPSynapse(static_cast<AllDynamicSTDPSynapsesDeviceProperties *>(allSynapsesDevice), src_neuron, synapse_index, source_index, dest_index, sum_point, deltaT, type );
break;
default:
assert(false);
}
allSynapsesDevice->W[iSync + synapse_index] = W_d[src_neuron * num_neurons + dest_neuron] * synSign(type) * AllSynapses::SYNAPSE_STRENGTH_ADJUSTMENT;
}
/*
* Remove a synapse from the network.
*
* @param[in] allSynapsesDevice Pointer to the AllSpikingSynapsesDeviceProperties structures
* on device memory.
* @param neuron_index Index of a neuron.
* @param synapse_index Index of a synapse.
* @param[in] maxSynapses Maximum number of synapses per neuron.
*/
__device__ void eraseSpikingSynapse( AllSpikingSynapsesDeviceProperties* allSynapsesDevice, const int neuron_index, const int synapse_index, int maxSynapses )
{
BGSIZE iSync = maxSynapses * neuron_index + synapse_index;
allSynapsesDevice->synapse_counts[neuron_index]--;
allSynapsesDevice->in_use[iSync] = false;
allSynapsesDevice->summationPoint[iSync] = NULL;
}
/*
* Returns the type of synapse at the given coordinates
*
* @param[in] allNeuronsDevice Pointer to the Neuron structures in device memory.
* @param src_neuron Index of the source neuron.
* @param dest_neuron Index of the destination neuron.
*/
__device__ synapseType synType( neuronType* neuron_type_map_d, const int src_neuron, const int dest_neuron )
{
if ( neuron_type_map_d[src_neuron] == INH && neuron_type_map_d[dest_neuron] == INH )
return II;
else if ( neuron_type_map_d[src_neuron] == INH && neuron_type_map_d[dest_neuron] == EXC )
return IE;
else if ( neuron_type_map_d[src_neuron] == EXC && neuron_type_map_d[dest_neuron] == INH )
return EI;
else if ( neuron_type_map_d[src_neuron] == EXC && neuron_type_map_d[dest_neuron] == EXC )
return EE;
return STYPE_UNDEF;
}
/* -------------------------------------*\
|* # Global Functions for updateSynapses
\* -------------------------------------*/
/*
* Adjust the strength of the synapse or remove it from the synapse map if it has gone below
* zero.
*
* @param[in] num_neurons Number of neurons.
* @param[in] deltaT The time step size.
* @param[in] W_d Array of synapse weight.
* @param[in] maxSynapses Maximum number of synapses per neuron.
* @param[in] allNeuronsDevice Pointer to the Neuron structures in device memory.
* @param[in] allSynapsesDevice Pointer to the Synapse structures in device memory.
*/
__global__ void updateSynapsesWeightsDevice( int num_neurons, BGFLOAT deltaT, BGFLOAT* W_d, int maxSynapses, AllSpikingNeuronsDeviceProperties* allNeuronsDevice, AllSpikingSynapsesDeviceProperties* allSynapsesDevice, neuronType* neuron_type_map_d )
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if ( idx >= num_neurons )
return;
int adjusted = 0;
//int could_have_been_removed = 0; // TODO: use this value
int removed = 0;
int added = 0;
// Scale and add sign to the areas
// visit each neuron 'a'
int src_neuron = idx;
// and each destination neuron 'b'
for (int dest_neuron = 0; dest_neuron < num_neurons; dest_neuron++) {
// visit each synapse at (xa,ya)
bool connected = false;
synapseType type = synType(neuron_type_map_d, src_neuron, dest_neuron);
// for each existing synapse
BGSIZE existing_synapses = allSynapsesDevice->synapse_counts[src_neuron];
int existing_synapses_checked = 0;
for (BGSIZE synapse_index = 0; (existing_synapses_checked < existing_synapses) && !connected; synapse_index++) {
BGSIZE iSyn = maxSynapses * src_neuron + synapse_index;
if (allSynapsesDevice->in_use[iSyn] == true) {
// if there is a synapse between a and b
if (allSynapsesDevice->destNeuronIndex[iSyn] == dest_neuron) {
connected = true;
adjusted++;
// adjust the strength of the synapse or remove
// it from the synapse map if it has gone below
// zero.
if (W_d[src_neuron * num_neurons + dest_neuron] < 0) {
removed++;
eraseSpikingSynapse(allSynapsesDevice, src_neuron, synapse_index, maxSynapses);
} else {
// adjust
// g_synapseStrengthAdjustmentConstant is 1.0e-8;
allSynapsesDevice->W[iSyn] = W_d[src_neuron * num_neurons
+ dest_neuron] * synSign(type) * AllSynapses::SYNAPSE_STRENGTH_ADJUSTMENT;
}
}
existing_synapses_checked++;
}
}
// if not connected and weight(a,b) > 0, add a new synapse from a to b
if (!connected && (W_d[src_neuron * num_neurons + dest_neuron] > 0)) {
// locate summation point
BGFLOAT* sum_point = &( allNeuronsDevice->summation_map[dest_neuron] );
added++;
addSpikingSynapse(allSynapsesDevice, type, src_neuron, dest_neuron, src_neuron, dest_neuron, sum_point, deltaT, W_d, num_neurons);
}
}
}
/*
* Adds a synapse to the network. Requires the locations of the source and
* destination neurons.
*
* @param allSynapsesDevice Pointer to the Synapse structures in device memory.
* @param pSummationMap Pointer to the summation point.
* @param width Width of neuron map (assumes square).
* @param deltaT The simulation time step size.
* @param weight Synapse weight.
*/
__global__ void initSynapsesDevice( int n, AllDSSynapsesDeviceProperties* allSynapsesDevice, BGFLOAT *pSummationMap, int width, const BGFLOAT deltaT, BGFLOAT weight )
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if ( idx >= n )
return;
// create a synapse
int neuron_index = idx;
BGFLOAT* sum_point = &( pSummationMap[neuron_index] );
synapseType type = allSynapsesDevice->type[neuron_index];
createDSSynapse(allSynapsesDevice, neuron_index, 0, 0, neuron_index, sum_point, deltaT, type );
allSynapsesDevice->W[neuron_index] = weight * AllSynapses::SYNAPSE_STRENGTH_ADJUSTMENT;
}
|
0da2fd9d9c085e36e2314c166a2e81d82d66da41.cu
|
#include "AllSynapsesDeviceFuncs.h"
#include "AllSynapses.h"
#include "AllSTDPSynapses.h"
#include "AllDynamicSTDPSynapses.h"
// a device variable to store synapse class ID.
__device__ enumClassSynapses classSynapses_d = undefClassSynapses;
/* --------------------------------------*\
|* # Device Functions for advanceSynapses
\* --------------------------------------*/
/*
* Update PSR (post synapse response)
*
* @param allSynapsesDevice Reference to the AllSpikingSynapsesDeviceProperties struct
* on device memory.
* @param iSyn Index of the synapse to set.
* @param simulationStep The current simulation step.
* @param deltaT Inner simulation step duration.
*/
__device__ void changeSpikingSynapsesPSRDevice(AllSpikingSynapsesDeviceProperties* allSynapsesDevice, const BGSIZE iSyn, const uint64_t simulationStep, const BGFLOAT deltaT)
{
BGFLOAT &psr = allSynapsesDevice->psr[iSyn];
BGFLOAT &W = allSynapsesDevice->W[iSyn];
BGFLOAT &decay = allSynapsesDevice->decay[iSyn];
psr += ( W / decay ); // calculate psr
}
/*
* Update PSR (post synapse response)
*
* @param allSynapsesDevice Reference to the AllDSSynapsesDeviceProperties struct
* on device memory.
* @param iSyn Index of the synapse to set.
* @param simulationStep The current simulation step.
* @param deltaT Inner simulation step duration.
*/
__device__ void changeDSSynapsePSRDevice(AllDSSynapsesDeviceProperties* allSynapsesDevice, const BGSIZE iSyn, const uint64_t simulationStep, const BGFLOAT deltaT)
{
//assert( iSyn < allSynapsesDevice->maxSynapsesPerNeuron * allSynapsesDevice->count_neurons );
uint64_t &lastSpike = allSynapsesDevice->lastSpike[iSyn];
BGFLOAT &r = allSynapsesDevice->r[iSyn];
BGFLOAT &u = allSynapsesDevice->u[iSyn];
BGFLOAT D = allSynapsesDevice->D[iSyn];
BGFLOAT F = allSynapsesDevice->F[iSyn];
BGFLOAT U = allSynapsesDevice->U[iSyn];
BGFLOAT W = allSynapsesDevice->W[iSyn];
BGFLOAT &psr = allSynapsesDevice->psr[iSyn];
BGFLOAT decay = allSynapsesDevice->decay[iSyn];
// adjust synapse parameters
if (lastSpike != ULONG_MAX) {
BGFLOAT isi = (simulationStep - lastSpike) * deltaT ;
r = 1 + ( r * ( 1 - u ) - 1 ) * exp( -isi / D );
u = U + u * ( 1 - U ) * exp( -isi / F );
}
psr += ( ( W / decay ) * u * r );// calculate psr
lastSpike = simulationStep; // record the time of the spike
}
/*
* Checks if there is an input spike in the queue.
*
* @param[in] allSynapsesDevice Pointer to AllSpikingSynapsesDeviceProperties structures
* on device memory.
* @param[in] iSyn Index of the Synapse to check.
* @return true if there is an input spike event.
*/
__device__ bool isSpikingSynapsesSpikeQueueDevice(AllSpikingSynapsesDeviceProperties* allSynapsesDevice, BGSIZE iSyn)
{
uint32_t &delay_queue = allSynapsesDevice->delayQueue[iSyn];
int &delayIdx = allSynapsesDevice->delayIdx[iSyn];
int ldelayQueue = allSynapsesDevice->ldelayQueue[iSyn];
uint32_t delayMask = (0x1 << delayIdx);
bool isFired = delay_queue & (delayMask);
delay_queue &= ~(delayMask);
if ( ++delayIdx >= ldelayQueue ) {
delayIdx = 0;
}
return isFired;
}
/*
* Adjust synapse weight according to the Spike-timing-dependent synaptic modification
* induced by natural spike trains
*
* @param allSynapsesDevice Pointer to the AllSTDPSynapsesDeviceProperties structures
* on device memory.
* @param iSyn Index of the synapse to set.
* @param delta Pre/post synaptic spike interval.
* @param epost Params for the rule given in Froemke and Dan (2002).
* @param epre Params for the rule given in Froemke and Dan (2002).
*/
__device__ void stdpLearningDevice(AllSTDPSynapsesDeviceProperties* allSynapsesDevice, const BGSIZE iSyn, double delta, double epost, double epre)
{
BGFLOAT STDPgap = allSynapsesDevice->STDPgap[iSyn];
BGFLOAT muneg = allSynapsesDevice->muneg[iSyn];
BGFLOAT mupos = allSynapsesDevice->mupos[iSyn];
BGFLOAT tauneg = allSynapsesDevice->tauneg[iSyn];
BGFLOAT taupos = allSynapsesDevice->taupos[iSyn];
BGFLOAT Aneg = allSynapsesDevice->Aneg[iSyn];
BGFLOAT Apos = allSynapsesDevice->Apos[iSyn];
BGFLOAT Wex = allSynapsesDevice->Wex[iSyn];
BGFLOAT &W = allSynapsesDevice->W[iSyn];
BGFLOAT dw;
if (delta < -STDPgap) {
// Depression
dw = pow(W, muneg) * Aneg * exp(delta / tauneg);
} else if (delta > STDPgap) {
// Potentiation
dw = pow(Wex - W, mupos) * Apos * exp(-delta / taupos);
} else {
return;
}
W += epost * epre * dw;
// check the sign
if ((Wex < 0 && W > 0) || (Wex > 0 && W < 0)) W = 0;
// check for greater Wmax
if (fabs(W) > fabs(Wex)) W = Wex;
DEBUG_SYNAPSE(
printf("AllSTDPSynapses::stdpLearning:\n");
printf(" iSyn: %d\n", iSyn);
printf(" delta: %f\n", delta);
printf(" epre: %f\n", epre);
printf(" epost: %f\n", epost);
printf(" dw: %f\n", dw);
printf(" W: %f\n\n", W);
);
}
/*
* Checks if there is an input spike in the queue.
*
* @param[in] allSynapsesDevice Pointer to AllSTDPSynapsesDeviceProperties structures
* on device memory.
* @param[in] iSyn Index of the Synapse to check.
* @return true if there is an input spike event.
*/
__device__ bool isSTDPSynapseSpikeQueuePostDevice(AllSTDPSynapsesDeviceProperties* allSynapsesDevice, BGSIZE iSyn)
{
uint32_t &delay_queue = allSynapsesDevice->delayQueuePost[iSyn];
int &delayIdx = allSynapsesDevice->delayIdxPost[iSyn];
int ldelayQueue = allSynapsesDevice->ldelayQueuePost[iSyn];
uint32_t delayMask = (0x1 << delayIdx);
bool isFired = delay_queue & (delayMask);
delay_queue &= ~(delayMask);
if ( ++delayIdx >= ldelayQueue ) {
delayIdx = 0;
}
return isFired;
}
/*
* Gets the spike history of the neuron.
*
* @param allNeuronsDevice Reference to the allNeurons struct on device memory.
* @param index Index of the neuron to get spike history.
* @param offIndex Offset of the history beffer to get.
* -1 will return the last spike.
* @param max_spikes Maximum number of spikes per neuron per epoch.
* @return Spike history.
*/
__device__ uint64_t getSTDPSynapseSpikeHistoryDevice(AllSpikingNeuronsDeviceProperties* allNeuronsDevice, int index, int offIndex, int max_spikes)
{
// offIndex is a minus offset
int idxSp = (allNeuronsDevice->spikeCount[index] + allNeuronsDevice->spikeCountOffset[index] + max_spikes + offIndex) % max_spikes;
return allNeuronsDevice->spike_history[index][idxSp];
}
/* --------------------------------------*\
|* # Global Functions for advanceSynapses
\* --------------------------------------*/
/*
* CUDA code for advancing spiking synapses.
* Perform updating synapses for one time step.
*
* @param[in] total_synapse_counts Number of synapses.
* @param synapseIndexMapDevice Reference to the SynapseIndexMap on device memory.
* @param[in] simulationStep The current simulation step.
* @param[in] deltaT Inner simulation step duration.
* @param[in] allSynapsesDevice Pointer to AllSpikingSynapsesDeviceProperties structures
* on device memory.
*/
__global__ void advanceSpikingSynapsesDevice ( int total_synapse_counts, SynapseIndexMap* synapseIndexMapDevice, uint64_t simulationStep, const BGFLOAT deltaT, AllSpikingSynapsesDeviceProperties* allSynapsesDevice ) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if ( idx >= total_synapse_counts )
return;
BGSIZE iSyn = synapseIndexMapDevice->activeSynapseIndex[idx];
BGFLOAT &psr = allSynapsesDevice->psr[iSyn];
BGFLOAT decay = allSynapsesDevice->decay[iSyn];
// Checks if there is an input spike in the queue.
bool isFired = isSpikingSynapsesSpikeQueueDevice(allSynapsesDevice, iSyn);
// is an input in the queue?
if (isFired) {
switch (classSynapses_d) {
case classAllSpikingSynapses:
changeSpikingSynapsesPSRDevice(static_cast<AllSpikingSynapsesDeviceProperties*>(allSynapsesDevice), iSyn, simulationStep, deltaT);
break;
case classAllDSSynapses:
changeDSSynapsePSRDevice(static_cast<AllDSSynapsesDeviceProperties*>(allSynapsesDevice), iSyn, simulationStep, deltaT);
break;
default:
assert(false);
}
}
// decay the post spike response
psr *= decay;
}
/*
* CUDA code for advancing STDP synapses.
* Perform updating synapses for one time step.
*
* @param[in] total_synapse_counts Number of synapses.
* @param synapseIndexMapDevice Reference to the SynapseIndexMap on device memory.
* @param[in] simulationStep The current simulation step.
* @param[in] deltaT Inner simulation step duration.
* @param[in] allSynapsesDevice Pointer to AllSTDPSynapsesDeviceProperties structures
* on device memory.
*/
__global__ void advanceSTDPSynapsesDevice ( int total_synapse_counts, SynapseIndexMap* synapseIndexMapDevice, uint64_t simulationStep, const BGFLOAT deltaT, AllSTDPSynapsesDeviceProperties* allSynapsesDevice, AllSpikingNeuronsDeviceProperties* allNeuronsDevice, int max_spikes, int width ) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if ( idx >= total_synapse_counts )
return;
BGSIZE iSyn = synapseIndexMapDevice->activeSynapseIndex[idx];
BGFLOAT &decay = allSynapsesDevice->decay[iSyn];
BGFLOAT &psr = allSynapsesDevice->psr[iSyn];
// is an input in the queue?
bool fPre = isSpikingSynapsesSpikeQueueDevice(allSynapsesDevice, iSyn);
bool fPost = isSTDPSynapseSpikeQueuePostDevice(allSynapsesDevice, iSyn);
if (fPre || fPost) {
BGFLOAT &tauspre = allSynapsesDevice->tauspre[iSyn];
BGFLOAT &tauspost = allSynapsesDevice->tauspost[iSyn];
BGFLOAT &taupos = allSynapsesDevice->taupos[iSyn];
BGFLOAT &tauneg = allSynapsesDevice->tauneg[iSyn];
int &total_delay = allSynapsesDevice->total_delay[iSyn];
bool &useFroemkeDanSTDP = allSynapsesDevice->useFroemkeDanSTDP[iSyn];
// pre and post neurons index
int idxPre = allSynapsesDevice->sourceNeuronIndex[iSyn];
int idxPost = allSynapsesDevice->destNeuronIndex[iSyn];
int64_t spikeHistory, spikeHistory2;
BGFLOAT delta;
BGFLOAT epre, epost;
if (fPre) { // preSpikeHit
// spikeCount points to the next available position of spike_history,
// so the getSpikeHistory w/offset = -2 will return the spike time
// just one before the last spike.
spikeHistory = getSTDPSynapseSpikeHistoryDevice(allNeuronsDevice, idxPre, -2, max_spikes);
if (spikeHistory > 0 && useFroemkeDanSTDP) {
// delta will include the transmission delay
delta = ((int64_t)simulationStep - spikeHistory) * deltaT;
epre = 1.0 - exp(-delta / tauspre);
} else {
epre = 1.0;
}
// call the learning function stdpLearning() for each pair of
// pre-post spikes
int offIndex = -1; // last spike
while (true) {
spikeHistory = getSTDPSynapseSpikeHistoryDevice(allNeuronsDevice, idxPost, offIndex, max_spikes);
if (spikeHistory == ULONG_MAX)
break;
// delta is the spike interval between pre-post spikes
delta = (spikeHistory - (int64_t)simulationStep) * deltaT;
DEBUG_SYNAPSE(
printf("advanceSTDPSynapsesDevice: fPre\n");
printf(" iSyn: %d\n", iSyn);
printf(" idxPre: %d\n", idxPre);
printf(" idxPost: %d\n", idxPost);
printf(" spikeHistory: %d\n", spikeHistory);
printf(" simulationStep: %d\n", simulationStep);
printf(" delta: %f\n\n", delta);
);
if (delta <= -3.0 * tauneg)
break;
if (useFroemkeDanSTDP) {
spikeHistory2 = getSTDPSynapseSpikeHistoryDevice(allNeuronsDevice, idxPost, offIndex-1, max_spikes);
if (spikeHistory2 == ULONG_MAX)
break;
epost = 1.0 - exp(-((spikeHistory - spikeHistory2) * deltaT) / tauspost);
} else {
epost = 1.0;
}
stdpLearningDevice(allSynapsesDevice, iSyn, delta, epost, epre);
--offIndex;
}
switch (classSynapses_d) {
case classAllSTDPSynapses:
changeSpikingSynapsesPSRDevice(static_cast<AllSpikingSynapsesDeviceProperties*>(allSynapsesDevice), iSyn, simulationStep, deltaT);
break;
case classAllDynamicSTDPSynapses:
// Note: we cast void * over the allSynapsesDevice, then recast it,
// because AllDSSynapsesDeviceProperties inherited properties from
// the AllDSSynapsesDeviceProperties and the AllSTDPSynapsesDeviceProperties.
changeDSSynapsePSRDevice(static_cast<AllDSSynapsesDeviceProperties*>((void *)allSynapsesDevice), iSyn, simulationStep, deltaT);
break;
default:
assert(false);
}
}
if (fPost) { // postSpikeHit
// spikeCount points to the next available position of spike_history,
// so the getSpikeHistory w/offset = -2 will return the spike time
// just one before the last spike.
spikeHistory = getSTDPSynapseSpikeHistoryDevice(allNeuronsDevice, idxPost, -2, max_spikes);
if (spikeHistory > 0 && useFroemkeDanSTDP) {
// delta will include the transmission delay
delta = ((int64_t)simulationStep - spikeHistory) * deltaT;
epost = 1.0 - exp(-delta / tauspost);
} else {
epost = 1.0;
}
// call the learning function stdpLearning() for each pair of
// post-pre spikes
int offIndex = -1; // last spike
while (true) {
spikeHistory = getSTDPSynapseSpikeHistoryDevice(allNeuronsDevice, idxPre, offIndex, max_spikes);
if (spikeHistory == ULONG_MAX)
break;
// delta is the spike interval between post-pre spikes
delta = ((int64_t)simulationStep - spikeHistory - total_delay) * deltaT;
DEBUG_SYNAPSE(
printf("advanceSTDPSynapsesDevice: fPost\n");
printf(" iSyn: %d\n", iSyn);
printf(" idxPre: %d\n", idxPre);
printf(" idxPost: %d\n", idxPost);
printf(" spikeHistory: %d\n", spikeHistory);
printf(" simulationStep: %d\n", simulationStep);
printf(" delta: %f\n\n", delta);
);
if (delta <= 0 || delta >= 3.0 * taupos)
break;
if (useFroemkeDanSTDP) {
spikeHistory2 = getSTDPSynapseSpikeHistoryDevice(allNeuronsDevice, idxPre, offIndex-1, max_spikes);
if (spikeHistory2 == ULONG_MAX)
break;
epre = 1.0 - exp(-((spikeHistory - spikeHistory2) * deltaT) / tauspre);
} else {
epre = 1.0;
}
stdpLearningDevice(allSynapsesDevice, iSyn, delta, epost, epre);
--offIndex;
}
}
}
// decay the post spike response
psr *= decay;
}
/* ------------------------------------*\
|* # Device Functions for createSynapse
\* ------------------------------------*/
/*
* Return 1 if originating neuron is excitatory, -1 otherwise.
*
* @param[in] t synapseType I to I, I to E, E to I, or E to E
* @return 1 or -1
*/
__device__ int synSign( synapseType t )
{
switch ( t )
{
case II:
case IE:
return -1;
case EI:
case EE:
return 1;
}
return 0;
}
/*
* Create a Spiking Synapse and connect it to the model.
*
* @param allSynapsesDevice Pointer to the AllSpikingSynapsesDeviceProperties structures
* on device memory.
* @param neuron_index Index of the source neuron.
* @param synapse_index Index of the Synapse to create.
* @param source_x X location of source.
* @param source_y Y location of source.
* @param dest_x X location of destination.
* @param dest_y Y location of destination.
* @param sum_point Pointer to the summation point.
* @param deltaT The time step size.
* @param type Type of the Synapse to create.
*/
__device__ void createSpikingSynapse(AllSpikingSynapsesDeviceProperties* allSynapsesDevice, const int neuron_index, const int synapse_index, int source_index, int dest_index, BGFLOAT *sum_point, const BGFLOAT deltaT, synapseType type)
{
BGFLOAT delay;
BGSIZE max_synapses = allSynapsesDevice->maxSynapsesPerNeuron;
BGSIZE iSyn = max_synapses * neuron_index + synapse_index;
allSynapsesDevice->in_use[iSyn] = true;
allSynapsesDevice->summationPoint[iSyn] = sum_point;
allSynapsesDevice->destNeuronIndex[iSyn] = dest_index;
allSynapsesDevice->sourceNeuronIndex[iSyn] = source_index;
allSynapsesDevice->W[iSyn] = synSign(type) * 10.0e-9;
allSynapsesDevice->delayQueue[iSyn] = 0;
allSynapsesDevice->delayIdx[iSyn] = 0;
allSynapsesDevice->ldelayQueue[iSyn] = LENGTH_OF_DELAYQUEUE;
allSynapsesDevice->psr[iSyn] = 0.0;
allSynapsesDevice->type[iSyn] = type;
allSynapsesDevice->tau[iSyn] = DEFAULT_tau;
BGFLOAT tau;
switch (type) {
case II:
tau = 6e-3;
delay = 0.8e-3;
break;
case IE:
tau = 6e-3;
delay = 0.8e-3;
break;
case EI:
tau = 3e-3;
delay = 0.8e-3;
break;
case EE:
tau = 3e-3;
delay = 1.5e-3;
break;
default:
break;
}
allSynapsesDevice->tau[iSyn] = tau;
allSynapsesDevice->decay[iSyn] = exp( -deltaT / tau );
allSynapsesDevice->total_delay[iSyn] = static_cast<int>( delay / deltaT ) + 1;
uint32_t size = allSynapsesDevice->total_delay[iSyn] / ( sizeof(uint8_t) * 8 ) + 1;
assert( size <= BYTES_OF_DELAYQUEUE );
}
/*
* Create a DS Synapse and connect it to the model.
*
* @param allSynapsesDevice Pointer to the AllDSSynapsesDeviceProperties structures
* on device memory.
* @param neuron_index Index of the source neuron.
* @param synapse_index Index of the Synapse to create.
* @param source_x X location of source.
* @param source_y Y location of source.
* @param dest_x X location of destination.
* @param dest_y Y location of destination.
* @param sum_point Pointer to the summation point.
* @param deltaT The time step size.
* @param type Type of the Synapse to create.
*/
__device__ void createDSSynapse(AllDSSynapsesDeviceProperties* allSynapsesDevice, const int neuron_index, const int synapse_index, int source_index, int dest_index, BGFLOAT *sum_point, const BGFLOAT deltaT, synapseType type)
{
BGFLOAT delay;
BGSIZE max_synapses = allSynapsesDevice->maxSynapsesPerNeuron;
BGSIZE iSyn = max_synapses * neuron_index + synapse_index;
allSynapsesDevice->in_use[iSyn] = true;
allSynapsesDevice->summationPoint[iSyn] = sum_point;
allSynapsesDevice->destNeuronIndex[iSyn] = dest_index;
allSynapsesDevice->sourceNeuronIndex[iSyn] = source_index;
allSynapsesDevice->W[iSyn] = synSign(type) * 10.0e-9;
allSynapsesDevice->delayQueue[iSyn] = 0;
allSynapsesDevice->delayIdx[iSyn] = 0;
allSynapsesDevice->ldelayQueue[iSyn] = LENGTH_OF_DELAYQUEUE;
allSynapsesDevice->psr[iSyn] = 0.0;
allSynapsesDevice->r[iSyn] = 1.0;
allSynapsesDevice->u[iSyn] = 0.4; // DEFAULT_U
allSynapsesDevice->lastSpike[iSyn] = ULONG_MAX;
allSynapsesDevice->type[iSyn] = type;
allSynapsesDevice->U[iSyn] = DEFAULT_U;
allSynapsesDevice->tau[iSyn] = DEFAULT_tau;
BGFLOAT U;
BGFLOAT D;
BGFLOAT F;
BGFLOAT tau;
switch (type) {
case II:
U = 0.32;
D = 0.144;
F = 0.06;
tau = 6e-3;
delay = 0.8e-3;
break;
case IE:
U = 0.25;
D = 0.7;
F = 0.02;
tau = 6e-3;
delay = 0.8e-3;
break;
case EI:
U = 0.05;
D = 0.125;
F = 1.2;
tau = 3e-3;
delay = 0.8e-3;
break;
case EE:
U = 0.5;
D = 1.1;
F = 0.05;
tau = 3e-3;
delay = 1.5e-3;
break;
default:
break;
}
allSynapsesDevice->U[iSyn] = U;
allSynapsesDevice->D[iSyn] = D;
allSynapsesDevice->F[iSyn] = F;
allSynapsesDevice->tau[iSyn] = tau;
allSynapsesDevice->decay[iSyn] = exp( -deltaT / tau );
allSynapsesDevice->total_delay[iSyn] = static_cast<int>( delay / deltaT ) + 1;
uint32_t size = allSynapsesDevice->total_delay[iSyn] / ( sizeof(uint8_t) * 8 ) + 1;
assert( size <= BYTES_OF_DELAYQUEUE );
}
/*
* Create a Synapse and connect it to the model.
*
* @param allSynapsesDevice Pointer to the AllSTDPSynapsesDeviceProperties structures
* on device memory.
* @param neuron_index Index of the source neuron.
* @param synapse_index Index of the Synapse to create.
* @param source_x X location of source.
* @param source_y Y location of source.
* @param dest_x X location of destination.
* @param dest_y Y location of destination.
* @param sum_point Pointer to the summation point.
* @param deltaT The time step size.
* @param type Type of the Synapse to create.
*/
__device__ void createSTDPSynapse(AllSTDPSynapsesDeviceProperties* allSynapsesDevice, const int neuron_index, const int synapse_index, int source_index, int dest_index, BGFLOAT *sum_point, const BGFLOAT deltaT, synapseType type)
{
BGFLOAT delay;
BGSIZE max_synapses = allSynapsesDevice->maxSynapsesPerNeuron;
BGSIZE iSyn = max_synapses * neuron_index + synapse_index;
allSynapsesDevice->in_use[iSyn] = true;
allSynapsesDevice->summationPoint[iSyn] = sum_point;
allSynapsesDevice->destNeuronIndex[iSyn] = dest_index;
allSynapsesDevice->sourceNeuronIndex[iSyn] = source_index;
allSynapsesDevice->W[iSyn] = synSign(type) * 10.0e-9;
allSynapsesDevice->delayQueue[iSyn] = 0;
allSynapsesDevice->delayIdx[iSyn] = 0;
allSynapsesDevice->ldelayQueue[iSyn] = LENGTH_OF_DELAYQUEUE;
allSynapsesDevice->psr[iSyn] = 0.0;
allSynapsesDevice->type[iSyn] = type;
allSynapsesDevice->tau[iSyn] = DEFAULT_tau;
BGFLOAT tau;
switch (type) {
case II:
tau = 6e-3;
delay = 0.8e-3;
break;
case IE:
tau = 6e-3;
delay = 0.8e-3;
break;
case EI:
tau = 3e-3;
delay = 0.8e-3;
break;
case EE:
tau = 3e-3;
delay = 1.5e-3;
break;
default:
break;
}
allSynapsesDevice->tau[iSyn] = tau;
allSynapsesDevice->decay[iSyn] = exp( -deltaT / tau );
allSynapsesDevice->total_delay[iSyn] = static_cast<int>( delay / deltaT ) + 1;
uint32_t size = allSynapsesDevice->total_delay[iSyn] / ( sizeof(uint8_t) * 8 ) + 1;
assert( size <= BYTES_OF_DELAYQUEUE );
allSynapsesDevice->Apos[iSyn] = 0.5;
allSynapsesDevice->Aneg[iSyn] = -0.5;
allSynapsesDevice->STDPgap[iSyn] = 2e-3;
allSynapsesDevice->total_delayPost[iSyn] = 0;
allSynapsesDevice->tauspost[iSyn] = 0;
allSynapsesDevice->tauspre[iSyn] = 0;
allSynapsesDevice->taupos[iSyn] = 15e-3;
allSynapsesDevice->tauneg[iSyn] = 35e-3;
allSynapsesDevice->Wex[iSyn] = 1.0;
allSynapsesDevice->mupos[iSyn] = 0;
allSynapsesDevice->muneg[iSyn] = 0;
allSynapsesDevice->useFroemkeDanSTDP[iSyn] = false;
}
/*
* Create a Synapse and connect it to the model.
*
* @param allSynapsesDevice Pointer to the AllDynamicSTDPSynapsesDeviceProperties structures
* on device memory.
* @param neuron_index Index of the source neuron.
* @param synapse_index Index of the Synapse to create.
* @param source_x X location of source.
* @param source_y Y location of source.
* @param dest_x X location of destination.
* @param dest_y Y location of destination.
* @param sum_point Pointer to the summation point.
* @param deltaT The time step size.
* @param type Type of the Synapse to create.
*/
__device__ void createDynamicSTDPSynapse(AllDynamicSTDPSynapsesDeviceProperties* allSynapsesDevice, const int neuron_index, const int synapse_index, int source_index, int dest_index, BGFLOAT *sum_point, const BGFLOAT deltaT, synapseType type)
{
BGFLOAT delay;
BGSIZE max_synapses = allSynapsesDevice->maxSynapsesPerNeuron;
BGSIZE iSyn = max_synapses * neuron_index + synapse_index;
allSynapsesDevice->in_use[iSyn] = true;
allSynapsesDevice->summationPoint[iSyn] = sum_point;
allSynapsesDevice->destNeuronIndex[iSyn] = dest_index;
allSynapsesDevice->sourceNeuronIndex[iSyn] = source_index;
allSynapsesDevice->W[iSyn] = synSign(type) * 10.0e-9;
allSynapsesDevice->delayQueue[iSyn] = 0;
allSynapsesDevice->delayIdx[iSyn] = 0;
allSynapsesDevice->ldelayQueue[iSyn] = LENGTH_OF_DELAYQUEUE;
allSynapsesDevice->psr[iSyn] = 0.0;
allSynapsesDevice->r[iSyn] = 1.0;
allSynapsesDevice->u[iSyn] = 0.4; // DEFAULT_U
allSynapsesDevice->lastSpike[iSyn] = ULONG_MAX;
allSynapsesDevice->type[iSyn] = type;
allSynapsesDevice->U[iSyn] = DEFAULT_U;
allSynapsesDevice->tau[iSyn] = DEFAULT_tau;
BGFLOAT U;
BGFLOAT D;
BGFLOAT F;
BGFLOAT tau;
switch (type) {
case II:
U = 0.32;
D = 0.144;
F = 0.06;
tau = 6e-3;
delay = 0.8e-3;
break;
case IE:
U = 0.25;
D = 0.7;
F = 0.02;
tau = 6e-3;
delay = 0.8e-3;
break;
case EI:
U = 0.05;
D = 0.125;
F = 1.2;
tau = 3e-3;
delay = 0.8e-3;
break;
case EE:
U = 0.5;
D = 1.1;
F = 0.05;
tau = 3e-3;
delay = 1.5e-3;
break;
default:
break;
}
allSynapsesDevice->U[iSyn] = U;
allSynapsesDevice->D[iSyn] = D;
allSynapsesDevice->F[iSyn] = F;
allSynapsesDevice->tau[iSyn] = tau;
allSynapsesDevice->decay[iSyn] = exp( -deltaT / tau );
allSynapsesDevice->total_delay[iSyn] = static_cast<int>( delay / deltaT ) + 1;
uint32_t size = allSynapsesDevice->total_delay[iSyn] / ( sizeof(uint8_t) * 8 ) + 1;
assert( size <= BYTES_OF_DELAYQUEUE );
allSynapsesDevice->Apos[iSyn] = 0.5;
allSynapsesDevice->Aneg[iSyn] = -0.5;
allSynapsesDevice->STDPgap[iSyn] = 2e-3;
allSynapsesDevice->total_delayPost[iSyn] = 0;
allSynapsesDevice->tauspost[iSyn] = 0;
allSynapsesDevice->tauspre[iSyn] = 0;
allSynapsesDevice->taupos[iSyn] = 15e-3;
allSynapsesDevice->tauneg[iSyn] = 35e-3;
allSynapsesDevice->Wex[iSyn] = 1.0;
allSynapsesDevice->mupos[iSyn] = 0;
allSynapsesDevice->muneg[iSyn] = 0;
allSynapsesDevice->useFroemkeDanSTDP[iSyn] = false;
}
/*
* Adds a synapse to the network. Requires the locations of the source and
* destination neurons.
*
* @param allSynapsesDevice Pointer to the AllSpikingSynapsesDeviceProperties structures
* on device memory.
* @param type Type of the Synapse to create.
* @param src_neuron Index of the source neuron.
* @param dest_neuron Index of the destination neuron.
* @param source_x X location of source.
* @param source_y Y location of source.
* @param dest_x X location of destination.
* @param dest_y Y location of destination.
* @param sum_point Pointer to the summation point.
* @param deltaT The time step size.
* @param W_d Array of synapse weight.
* @param num_neurons The number of neurons.
*/
__device__ void addSpikingSynapse(AllSpikingSynapsesDeviceProperties* allSynapsesDevice, synapseType type, const int src_neuron, const int dest_neuron, int source_index, int dest_index, BGFLOAT *sum_point, const BGFLOAT deltaT, BGFLOAT* W_d, int num_neurons)
{
if (allSynapsesDevice->synapse_counts[src_neuron] >= allSynapsesDevice->maxSynapsesPerNeuron) {
return; // TODO: ERROR!
}
// add it to the list
BGSIZE synapse_index;
BGSIZE max_synapses = allSynapsesDevice->maxSynapsesPerNeuron;
BGSIZE iSync = max_synapses * src_neuron;
for (synapse_index = 0; synapse_index < max_synapses; synapse_index++) {
if (!allSynapsesDevice->in_use[iSync + synapse_index]) {
break;
}
}
allSynapsesDevice->synapse_counts[src_neuron]++;
// create a synapse
switch (classSynapses_d) {
case classAllSpikingSynapses:
createSpikingSynapse(allSynapsesDevice, src_neuron, synapse_index, source_index, dest_index, sum_point, deltaT, type );
break;
case classAllDSSynapses:
createDSSynapse(static_cast<AllDSSynapsesDeviceProperties *>(allSynapsesDevice), src_neuron, synapse_index, source_index, dest_index, sum_point, deltaT, type );
break;
case classAllSTDPSynapses:
createSTDPSynapse(static_cast<AllSTDPSynapsesDeviceProperties *>(allSynapsesDevice), src_neuron, synapse_index, source_index, dest_index, sum_point, deltaT, type );
break;
case classAllDynamicSTDPSynapses:
createDynamicSTDPSynapse(static_cast<AllDynamicSTDPSynapsesDeviceProperties *>(allSynapsesDevice), src_neuron, synapse_index, source_index, dest_index, sum_point, deltaT, type );
break;
default:
assert(false);
}
allSynapsesDevice->W[iSync + synapse_index] = W_d[src_neuron * num_neurons + dest_neuron] * synSign(type) * AllSynapses::SYNAPSE_STRENGTH_ADJUSTMENT;
}
/*
* Remove a synapse from the network.
*
* @param[in] allSynapsesDevice Pointer to the AllSpikingSynapsesDeviceProperties structures
* on device memory.
* @param neuron_index Index of a neuron.
* @param synapse_index Index of a synapse.
* @param[in] maxSynapses Maximum number of synapses per neuron.
*/
__device__ void eraseSpikingSynapse( AllSpikingSynapsesDeviceProperties* allSynapsesDevice, const int neuron_index, const int synapse_index, int maxSynapses )
{
BGSIZE iSync = maxSynapses * neuron_index + synapse_index;
allSynapsesDevice->synapse_counts[neuron_index]--;
allSynapsesDevice->in_use[iSync] = false;
allSynapsesDevice->summationPoint[iSync] = NULL;
}
/*
* Returns the type of synapse at the given coordinates
*
* @param[in] allNeuronsDevice Pointer to the Neuron structures in device memory.
* @param src_neuron Index of the source neuron.
* @param dest_neuron Index of the destination neuron.
*/
__device__ synapseType synType( neuronType* neuron_type_map_d, const int src_neuron, const int dest_neuron )
{
if ( neuron_type_map_d[src_neuron] == INH && neuron_type_map_d[dest_neuron] == INH )
return II;
else if ( neuron_type_map_d[src_neuron] == INH && neuron_type_map_d[dest_neuron] == EXC )
return IE;
else if ( neuron_type_map_d[src_neuron] == EXC && neuron_type_map_d[dest_neuron] == INH )
return EI;
else if ( neuron_type_map_d[src_neuron] == EXC && neuron_type_map_d[dest_neuron] == EXC )
return EE;
return STYPE_UNDEF;
}
/* -------------------------------------*\
|* # Global Functions for updateSynapses
\* -------------------------------------*/
/*
* Adjust the strength of the synapse or remove it from the synapse map if it has gone below
* zero.
*
* @param[in] num_neurons Number of neurons.
* @param[in] deltaT The time step size.
* @param[in] W_d Array of synapse weight.
* @param[in] maxSynapses Maximum number of synapses per neuron.
* @param[in] allNeuronsDevice Pointer to the Neuron structures in device memory.
* @param[in] allSynapsesDevice Pointer to the Synapse structures in device memory.
*/
__global__ void updateSynapsesWeightsDevice( int num_neurons, BGFLOAT deltaT, BGFLOAT* W_d, int maxSynapses, AllSpikingNeuronsDeviceProperties* allNeuronsDevice, AllSpikingSynapsesDeviceProperties* allSynapsesDevice, neuronType* neuron_type_map_d )
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if ( idx >= num_neurons )
return;
int adjusted = 0;
//int could_have_been_removed = 0; // TODO: use this value
int removed = 0;
int added = 0;
// Scale and add sign to the areas
// visit each neuron 'a'
int src_neuron = idx;
// and each destination neuron 'b'
for (int dest_neuron = 0; dest_neuron < num_neurons; dest_neuron++) {
// visit each synapse at (xa,ya)
bool connected = false;
synapseType type = synType(neuron_type_map_d, src_neuron, dest_neuron);
// for each existing synapse
BGSIZE existing_synapses = allSynapsesDevice->synapse_counts[src_neuron];
int existing_synapses_checked = 0;
for (BGSIZE synapse_index = 0; (existing_synapses_checked < existing_synapses) && !connected; synapse_index++) {
BGSIZE iSyn = maxSynapses * src_neuron + synapse_index;
if (allSynapsesDevice->in_use[iSyn] == true) {
// if there is a synapse between a and b
if (allSynapsesDevice->destNeuronIndex[iSyn] == dest_neuron) {
connected = true;
adjusted++;
// adjust the strength of the synapse or remove
// it from the synapse map if it has gone below
// zero.
if (W_d[src_neuron * num_neurons + dest_neuron] < 0) {
removed++;
eraseSpikingSynapse(allSynapsesDevice, src_neuron, synapse_index, maxSynapses);
} else {
// adjust
// g_synapseStrengthAdjustmentConstant is 1.0e-8;
allSynapsesDevice->W[iSyn] = W_d[src_neuron * num_neurons
+ dest_neuron] * synSign(type) * AllSynapses::SYNAPSE_STRENGTH_ADJUSTMENT;
}
}
existing_synapses_checked++;
}
}
// if not connected and weight(a,b) > 0, add a new synapse from a to b
if (!connected && (W_d[src_neuron * num_neurons + dest_neuron] > 0)) {
// locate summation point
BGFLOAT* sum_point = &( allNeuronsDevice->summation_map[dest_neuron] );
added++;
addSpikingSynapse(allSynapsesDevice, type, src_neuron, dest_neuron, src_neuron, dest_neuron, sum_point, deltaT, W_d, num_neurons);
}
}
}
/*
* Adds a synapse to the network. Requires the locations of the source and
* destination neurons.
*
* @param allSynapsesDevice Pointer to the Synapse structures in device memory.
* @param pSummationMap Pointer to the summation point.
* @param width Width of neuron map (assumes square).
* @param deltaT The simulation time step size.
* @param weight Synapse weight.
*/
__global__ void initSynapsesDevice( int n, AllDSSynapsesDeviceProperties* allSynapsesDevice, BGFLOAT *pSummationMap, int width, const BGFLOAT deltaT, BGFLOAT weight )
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if ( idx >= n )
return;
// create a synapse
int neuron_index = idx;
BGFLOAT* sum_point = &( pSummationMap[neuron_index] );
synapseType type = allSynapsesDevice->type[neuron_index];
createDSSynapse(allSynapsesDevice, neuron_index, 0, 0, neuron_index, sum_point, deltaT, type );
allSynapsesDevice->W[neuron_index] = weight * AllSynapses::SYNAPSE_STRENGTH_ADJUSTMENT;
}
|
6dfdc6c165f46eb2c93baf2a6b9e5459c982ea6a.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* File: NetworkForward.cu
* Author: akirby
*
* Created on April 23, 2020, 12:37 PM
*/
/* header files */
#include "Network.h"
#include <unistd.h>
#define MIN(x,y) (x)<(y) ? (x):(y)
#define MAX(x,y) (x)>(y) ? (x):(y)
#define BLOCK_LOOP(level) \
for (int i = 0; i < blocks[(level)].size(); ++i)
#define ROOT if(global_rank == 0)
void Network::fwd_sync(timerdata_t *time_data,data_t *data,const int nsamples,int level,char add_source){
compute_ctx_t *model = &dlmg.ctx.model_compute_ctx;
data_t *output;
checkCudaErrors(hipDeviceSynchronize());
double t1 = mpi_timer();
/* receive last layer output from previous model rank */
if(model_rank != 0){
MPI_Status status;
/* receive data on CPU */
data = leftGhost[level]->getOutDevice();
data_t *host_ghost_data = leftGhost[level]->getOutHost();
MPI_Recv(host_ghost_data->ptr,
getInSize(level)*nsamples,
MPI_DLREAL,
model_rank-1,
0,
model->mpi_comm,
&status);
/* copy data from CPU to GPU */
checkCudaErrors(hipMemcpy(data->ptr,
host_ghost_data->ptr,
sizeof(Real)*getInSize(level)*nsamples,
hipMemcpyHostToDevice));
}
double t2 = mpi_timer();
time_data->comm_time += t2-t1;
/* solve */
t1 = mpi_timer();
BLOCK_LOOP(level){
output = blocks[level][i]->fwd(data,nsamples,add_source);
data = output;
}
t2 = mpi_timer();
double comp_time = t2-t1;
/* send last layer output to next model rank */
t1 = mpi_timer();
if(model_rank != model_nranks - 1){
/* copy data from GPU to CPU */
Real *host_out = getOutHost(level);
checkCudaErrors(hipMemcpy(host_out,
output->ptr,
sizeof(Real)*getOutSize(level)*nsamples,
hipMemcpyDeviceToHost));
/* send data on CPU */
MPI_Send(host_out,
getOutSize(level)*nsamples,
MPI_DLREAL,
model_rank+1,
0,
model->mpi_comm);
}
t2 = mpi_timer();
time_data->comm_time += t2-t1;
checkCudaErrors(hipDeviceSynchronize());
double comp_time_total = 0.0;
MPI_Allreduce(&comp_time, &comp_time_total, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
time_data->comp_time += comp_time_total;
double total_loc = time_data->comm_time + time_data->comp_time;
// printf(" TIME BREAKDOWN rank[%d]: total = %f, computation = %f (%.1f\%), communication = %f (%.1f\%)\n",global_rank,
// total_loc,
// time_data->comp_time,time_data->comp_time/total_loc*100.0,
// time_data->comm_time,time_data->comm_time/total_loc*100.0);
}
|
6dfdc6c165f46eb2c93baf2a6b9e5459c982ea6a.cu
|
/**
* File: NetworkForward.cu
* Author: akirby
*
* Created on April 23, 2020, 12:37 PM
*/
/* header files */
#include "Network.h"
#include <unistd.h>
#define MIN(x,y) (x)<(y) ? (x):(y)
#define MAX(x,y) (x)>(y) ? (x):(y)
#define BLOCK_LOOP(level) \
for (int i = 0; i < blocks[(level)].size(); ++i)
#define ROOT if(global_rank == 0)
void Network::fwd_sync(timerdata_t *time_data,data_t *data,const int nsamples,int level,char add_source){
compute_ctx_t *model = &dlmg.ctx.model_compute_ctx;
data_t *output;
checkCudaErrors(cudaDeviceSynchronize());
double t1 = mpi_timer();
/* receive last layer output from previous model rank */
if(model_rank != 0){
MPI_Status status;
/* receive data on CPU */
data = leftGhost[level]->getOutDevice();
data_t *host_ghost_data = leftGhost[level]->getOutHost();
MPI_Recv(host_ghost_data->ptr,
getInSize(level)*nsamples,
MPI_DLREAL,
model_rank-1,
0,
model->mpi_comm,
&status);
/* copy data from CPU to GPU */
checkCudaErrors(cudaMemcpy(data->ptr,
host_ghost_data->ptr,
sizeof(Real)*getInSize(level)*nsamples,
cudaMemcpyHostToDevice));
}
double t2 = mpi_timer();
time_data->comm_time += t2-t1;
/* solve */
t1 = mpi_timer();
BLOCK_LOOP(level){
output = blocks[level][i]->fwd(data,nsamples,add_source);
data = output;
}
t2 = mpi_timer();
double comp_time = t2-t1;
/* send last layer output to next model rank */
t1 = mpi_timer();
if(model_rank != model_nranks - 1){
/* copy data from GPU to CPU */
Real *host_out = getOutHost(level);
checkCudaErrors(cudaMemcpy(host_out,
output->ptr,
sizeof(Real)*getOutSize(level)*nsamples,
cudaMemcpyDeviceToHost));
/* send data on CPU */
MPI_Send(host_out,
getOutSize(level)*nsamples,
MPI_DLREAL,
model_rank+1,
0,
model->mpi_comm);
}
t2 = mpi_timer();
time_data->comm_time += t2-t1;
checkCudaErrors(cudaDeviceSynchronize());
double comp_time_total = 0.0;
MPI_Allreduce(&comp_time, &comp_time_total, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
time_data->comp_time += comp_time_total;
double total_loc = time_data->comm_time + time_data->comp_time;
// printf(" TIME BREAKDOWN rank[%d]: total = %f, computation = %f (%.1f\%), communication = %f (%.1f\%)\n",global_rank,
// total_loc,
// time_data->comp_time,time_data->comp_time/total_loc*100.0,
// time_data->comm_time,time_data->comm_time/total_loc*100.0);
}
|
26bcd7851f299dfc8b00a4ee7464223c0757abfa.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by op2.py
//
//user function
__device__ void adt_calc_gpu( const double *x1, const double *x2, const double *x3,
const double *x4, const double *q, double *adt) {
double dx, dy, ri, u, v, c;
ri = 1.0f / q[0];
u = ri * q[1];
v = ri * q[2];
c = sqrt(gam_cuda * gm1_cuda * (ri * q[3] - 0.5f * (u * u + v * v)));
dx = x2[0] - x1[0];
dy = x2[1] - x1[1];
*adt = fabs(u * dy - v * dx) + c * sqrt(dx * dx + dy * dy);
dx = x3[0] - x2[0];
dy = x3[1] - x2[1];
*adt += fabs(u * dy - v * dx) + c * sqrt(dx * dx + dy * dy);
dx = x4[0] - x3[0];
dy = x4[1] - x3[1];
*adt += fabs(u * dy - v * dx) + c * sqrt(dx * dx + dy * dy);
dx = x1[0] - x4[0];
dy = x1[1] - x4[1];
*adt += fabs(u * dy - v * dx) + c * sqrt(dx * dx + dy * dy);
*adt = (*adt) / cfl_cuda;
}
// CUDA kernel function
__global__ void op_cuda_adt_calc(
const double *__restrict ind_arg0,
const int *__restrict opDat0Map,
const double *__restrict arg4,
double *arg5,
int start,
int end,
int set_size) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid + start < end) {
int n = tid + start;
//initialise local variables
int map0idx;
int map1idx;
int map2idx;
int map3idx;
map0idx = opDat0Map[n + set_size * 0];
map1idx = opDat0Map[n + set_size * 1];
map2idx = opDat0Map[n + set_size * 2];
map3idx = opDat0Map[n + set_size * 3];
//user-supplied kernel call
adt_calc_gpu(ind_arg0+map0idx*2,
ind_arg0+map1idx*2,
ind_arg0+map2idx*2,
ind_arg0+map3idx*2,
arg4+n*4,
arg5+n*1);
}
}
//host stub function
void op_par_loop_adt_calc(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5){
int nargs = 6;
op_arg args[6];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
args[3] = arg3;
args[4] = arg4;
args[5] = arg5;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(1);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[1].name = name;
OP_kernels[1].count += 1;
int ninds = 1;
int inds[6] = {0,0,0,0,-1,-1};
if (OP_diags>2) {
printf(" kernel routine with indirection: adt_calc\n");
}
int set_size = op_mpi_halo_exchanges_cuda(set, nargs, args);
if (set->size > 0) {
//set CUDA execution parameters
#ifdef OP_BLOCK_SIZE_1
int nthread = OP_BLOCK_SIZE_1;
#else
int nthread = OP_block_size;
#endif
for ( int round=0; round<2; round++ ){
if (round==1) {
op_mpi_wait_all_cuda(nargs, args);
}
int start = round==0 ? 0 : set->core_size;
int end = round==0 ? set->core_size : set->size + set->exec_size;
if (end-start>0) {
int nblocks = (end-start-1)/nthread+1;
hipLaunchKernelGGL(( op_cuda_adt_calc), dim3(nblocks),dim3(nthread), 0, 0,
(double *)arg0.data_d,
arg0.map_data_d,
(double*)arg4.data_d,
(double*)arg5.data_d,
start,end,set->size+set->exec_size);
}
}
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(hipDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[1].time += wall_t2 - wall_t1;
}
|
26bcd7851f299dfc8b00a4ee7464223c0757abfa.cu
|
//
// auto-generated by op2.py
//
//user function
__device__ void adt_calc_gpu( const double *x1, const double *x2, const double *x3,
const double *x4, const double *q, double *adt) {
double dx, dy, ri, u, v, c;
ri = 1.0f / q[0];
u = ri * q[1];
v = ri * q[2];
c = sqrt(gam_cuda * gm1_cuda * (ri * q[3] - 0.5f * (u * u + v * v)));
dx = x2[0] - x1[0];
dy = x2[1] - x1[1];
*adt = fabs(u * dy - v * dx) + c * sqrt(dx * dx + dy * dy);
dx = x3[0] - x2[0];
dy = x3[1] - x2[1];
*adt += fabs(u * dy - v * dx) + c * sqrt(dx * dx + dy * dy);
dx = x4[0] - x3[0];
dy = x4[1] - x3[1];
*adt += fabs(u * dy - v * dx) + c * sqrt(dx * dx + dy * dy);
dx = x1[0] - x4[0];
dy = x1[1] - x4[1];
*adt += fabs(u * dy - v * dx) + c * sqrt(dx * dx + dy * dy);
*adt = (*adt) / cfl_cuda;
}
// CUDA kernel function
__global__ void op_cuda_adt_calc(
const double *__restrict ind_arg0,
const int *__restrict opDat0Map,
const double *__restrict arg4,
double *arg5,
int start,
int end,
int set_size) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid + start < end) {
int n = tid + start;
//initialise local variables
int map0idx;
int map1idx;
int map2idx;
int map3idx;
map0idx = opDat0Map[n + set_size * 0];
map1idx = opDat0Map[n + set_size * 1];
map2idx = opDat0Map[n + set_size * 2];
map3idx = opDat0Map[n + set_size * 3];
//user-supplied kernel call
adt_calc_gpu(ind_arg0+map0idx*2,
ind_arg0+map1idx*2,
ind_arg0+map2idx*2,
ind_arg0+map3idx*2,
arg4+n*4,
arg5+n*1);
}
}
//host stub function
void op_par_loop_adt_calc(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5){
int nargs = 6;
op_arg args[6];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
args[3] = arg3;
args[4] = arg4;
args[5] = arg5;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(1);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[1].name = name;
OP_kernels[1].count += 1;
int ninds = 1;
int inds[6] = {0,0,0,0,-1,-1};
if (OP_diags>2) {
printf(" kernel routine with indirection: adt_calc\n");
}
int set_size = op_mpi_halo_exchanges_cuda(set, nargs, args);
if (set->size > 0) {
//set CUDA execution parameters
#ifdef OP_BLOCK_SIZE_1
int nthread = OP_BLOCK_SIZE_1;
#else
int nthread = OP_block_size;
#endif
for ( int round=0; round<2; round++ ){
if (round==1) {
op_mpi_wait_all_cuda(nargs, args);
}
int start = round==0 ? 0 : set->core_size;
int end = round==0 ? set->core_size : set->size + set->exec_size;
if (end-start>0) {
int nblocks = (end-start-1)/nthread+1;
op_cuda_adt_calc<<<nblocks,nthread>>>(
(double *)arg0.data_d,
arg0.map_data_d,
(double*)arg4.data_d,
(double*)arg5.data_d,
start,end,set->size+set->exec_size);
}
}
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(cudaDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[1].time += wall_t2 - wall_t1;
}
|
7ab3bcd060a3c4124f27ef1a665ccf347b288bff.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#define N 2//64
__device__ int bar() __attribute__((always_inline));
__device__ int bar()
{
return 5;
}
__global__ void foo()
{
int x = bar();
assert(x != 5);
//printf("%d ", x);
}
int main(){
//foo<<<1, N>>>();
ESBMC_verify_kernel(foo,1,N);
hipDeviceSynchronize();
return 0;
}
|
7ab3bcd060a3c4124f27ef1a665ccf347b288bff.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <cuda.h>
#define N 2//64
__device__ int bar() __attribute__((always_inline));
__device__ int bar()
{
return 5;
}
__global__ void foo()
{
int x = bar();
assert(x != 5);
//printf("%d ", x);
}
int main(){
//foo<<<1, N>>>();
ESBMC_verify_kernel(foo,1,N);
cudaThreadSynchronize();
return 0;
}
|
a8284dfa7951c81b7ab75bc521f22e860695b6cb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <iostream>
#include <cstdio>
#include <ctime>
#include "TimingCPU.h"
#include <iostream>
#include "TimingGPU.cuh"
#include <chrono>
typedef std::chrono::high_resolution_clock Clock;
/*Device Function - Offloading task to GPU*/
__global__ void gpu_vectorAddition(const int *a, const int *b, int *c, int size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size) {
c[i] = a[i] + b[i];
}
}
void cpu_vectorAddittion(const int* a, const int* b, int* c, int size) {
for (int i = 0; i < size; i++) {
c[i] = a[i] + b[i];
}
}
void randomIntialize(int * a, int * b, int size) {
time_t t;
srand((unsigned)time(&t));
for (size_t i = 0; i < size; i++) {
a[i] = (int)(rand() & 0xFF); // Between 0 - 255;
}
for (size_t i = 0; i < size; i++) {
b[i] = (int)(rand() & 0xFF); // Between 0 - 255;
}
printf(" Array A : ");
for (int i = 0; i < size; i++) {
printf(" %d", a[i]);
}
printf(" \n\n........................................................................ \n\n");
printf(" Array B : ");
for (int i = 0; i < size; i++) {
printf(" %d", b[i]);
}
printf(" \n\n........................................................................ \n\n");
}
void printResults(int * cpu_result, int * gpu_result, int size) {
printf(" CPU Result : ");
for (int i = 0; i < size; i++) {
printf(" %d", cpu_result[i]);
}
printf(" \n\n........................................................................ \n\n");
printf(" GPU Result : ");
for (int i = 0; i < size; i++) {
printf(" %d", gpu_result[i]);
}
printf(" \n\n........................................................................ \n\n");
}
/*HOST = CPU functionality , invokes kernel */
int main()
{
const int arr_Size = 256;
const int block_size = 32;
const int bytes = arr_Size * sizeof(int);
//int a[arr_Size] = {1,2,3,4};
//int b[arr_Size] = {0,2,3,1};
//int* gpu_result = (int *)malloc(bytes);
int* host_a, * host_b, *host_c, * gpu_result;
host_a = (int *)malloc(bytes);
host_b = (int*)malloc(bytes);
host_c = (int*)malloc(bytes);
gpu_result = (int*)malloc(bytes);
randomIntialize(host_a, host_b, arr_Size);
clock_t gpu_start, gpu_end;
gpu_start = clock();
auto t1 = Clock::now();
TimingCPU timer_CPU;
timer_CPU.StartCounter();
//memset(gpu_result, 0 , bytes);
cpu_vectorAddittion(host_a, host_b, host_c , arr_Size);
gpu_end = clock();
auto t2 = Clock::now();
//printf("CPU Execution time = %4.6f \n \n", (double)((double)(gpu_end - gpu_start) / CLOCKS_PER_SEC));
//std::cout << "Delta t2-t1: "
// << std::chrono::duration_cast<std::chrono::nanoseconds>(t2 - t1).count()
// << " nanoseconds" << std::endl;
//printf("CPU Execution time = %.20lf \n \n", (gpu_end - gpu_start) / (double)CLOCKS_PER_SEC);
std::cout << "CPU Timing = " << timer_CPU.GetCounter() << " ms" << std::endl;
//double cpu_end = timer_CPU.GetCounter();
/*...................................................... $$ DEVICE $$ .............................................................*/
int* d_a, * d_b, * d_c;
hipMalloc((void**)&d_a, bytes);
hipMalloc((void**)&d_b, bytes);
hipMalloc((void**)&d_c, bytes);
// Copy data from host to device;
hipMemcpy(d_a, host_a, bytes, hipMemcpyHostToDevice);
hipMemcpy(d_b, host_b, bytes, hipMemcpyHostToDevice);
dim3 block(block_size); // Four threads per block
dim3 grid((arr_Size / block.x) + 1); // Only 1 block in a grid
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
TimingGPU timer_GPU;
timer_GPU.StartCounter();
gpu_vectorAddition << < grid, block>> > (d_a, d_b, d_c, arr_Size);
hipEventRecord(stop);
hipDeviceSynchronize();
hipEventSynchronize(stop);
//std::cout << "GPU Timing = " << timer_GPU.GetCounter() << " ms" << std::endl;
hipMemcpy(gpu_result, d_c, bytes, hipMemcpyDeviceToHost);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop); // returns in the first argument the number of milliseconds time elapsed between the recording of start and stop
printResults(host_c, gpu_result, arr_Size);
//printf("GPU Elapsed Execution time = %.20lf\n" , milliseconds);
//std::cout << "CPU Timing = " << cpu_end << " ms" << std::endl;
std::cout << "GPU Execution time = " << milliseconds << " ms" << std::endl;
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
free(host_a);
free(host_b);
free(host_c);
free(gpu_result);
return 0;
}
|
a8284dfa7951c81b7ab75bc521f22e860695b6cb.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <iostream>
#include <cstdio>
#include <ctime>
#include "TimingCPU.h"
#include <iostream>
#include "TimingGPU.cuh"
#include <chrono>
typedef std::chrono::high_resolution_clock Clock;
/*Device Function - Offloading task to GPU*/
__global__ void gpu_vectorAddition(const int *a, const int *b, int *c, int size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size) {
c[i] = a[i] + b[i];
}
}
void cpu_vectorAddittion(const int* a, const int* b, int* c, int size) {
for (int i = 0; i < size; i++) {
c[i] = a[i] + b[i];
}
}
void randomIntialize(int * a, int * b, int size) {
time_t t;
srand((unsigned)time(&t));
for (size_t i = 0; i < size; i++) {
a[i] = (int)(rand() & 0xFF); // Between 0 - 255;
}
for (size_t i = 0; i < size; i++) {
b[i] = (int)(rand() & 0xFF); // Between 0 - 255;
}
printf(" Array A : ");
for (int i = 0; i < size; i++) {
printf(" %d", a[i]);
}
printf(" \n\n........................................................................ \n\n");
printf(" Array B : ");
for (int i = 0; i < size; i++) {
printf(" %d", b[i]);
}
printf(" \n\n........................................................................ \n\n");
}
void printResults(int * cpu_result, int * gpu_result, int size) {
printf(" CPU Result : ");
for (int i = 0; i < size; i++) {
printf(" %d", cpu_result[i]);
}
printf(" \n\n........................................................................ \n\n");
printf(" GPU Result : ");
for (int i = 0; i < size; i++) {
printf(" %d", gpu_result[i]);
}
printf(" \n\n........................................................................ \n\n");
}
/*HOST = CPU functionality , invokes kernel */
int main()
{
const int arr_Size = 256;
const int block_size = 32;
const int bytes = arr_Size * sizeof(int);
//int a[arr_Size] = {1,2,3,4};
//int b[arr_Size] = {0,2,3,1};
//int* gpu_result = (int *)malloc(bytes);
int* host_a, * host_b, *host_c, * gpu_result;
host_a = (int *)malloc(bytes);
host_b = (int*)malloc(bytes);
host_c = (int*)malloc(bytes);
gpu_result = (int*)malloc(bytes);
randomIntialize(host_a, host_b, arr_Size);
clock_t gpu_start, gpu_end;
gpu_start = clock();
auto t1 = Clock::now();
TimingCPU timer_CPU;
timer_CPU.StartCounter();
//memset(gpu_result, 0 , bytes);
cpu_vectorAddittion(host_a, host_b, host_c , arr_Size);
gpu_end = clock();
auto t2 = Clock::now();
//printf("CPU Execution time = %4.6f \n \n", (double)((double)(gpu_end - gpu_start) / CLOCKS_PER_SEC));
//std::cout << "Delta t2-t1: "
// << std::chrono::duration_cast<std::chrono::nanoseconds>(t2 - t1).count()
// << " nanoseconds" << std::endl;
//printf("CPU Execution time = %.20lf \n \n", (gpu_end - gpu_start) / (double)CLOCKS_PER_SEC);
std::cout << "CPU Timing = " << timer_CPU.GetCounter() << " ms" << std::endl;
//double cpu_end = timer_CPU.GetCounter();
/*...................................................... $$ DEVICE $$ .............................................................*/
int* d_a, * d_b, * d_c;
cudaMalloc((void**)&d_a, bytes);
cudaMalloc((void**)&d_b, bytes);
cudaMalloc((void**)&d_c, bytes);
// Copy data from host to device;
cudaMemcpy(d_a, host_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, host_b, bytes, cudaMemcpyHostToDevice);
dim3 block(block_size); // Four threads per block
dim3 grid((arr_Size / block.x) + 1); // Only 1 block in a grid
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
TimingGPU timer_GPU;
timer_GPU.StartCounter();
gpu_vectorAddition << < grid, block>> > (d_a, d_b, d_c, arr_Size);
cudaEventRecord(stop);
cudaDeviceSynchronize();
cudaEventSynchronize(stop);
//std::cout << "GPU Timing = " << timer_GPU.GetCounter() << " ms" << std::endl;
cudaMemcpy(gpu_result, d_c, bytes, cudaMemcpyDeviceToHost);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop); // returns in the first argument the number of milliseconds time elapsed between the recording of start and stop
printResults(host_c, gpu_result, arr_Size);
//printf("GPU Elapsed Execution time = %.20lf\n" , milliseconds);
//std::cout << "CPU Timing = " << cpu_end << " ms" << std::endl;
std::cout << "GPU Execution time = " << milliseconds << " ms" << std::endl;
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(host_a);
free(host_b);
free(host_c);
free(gpu_result);
return 0;
}
|
24fa66502e181b8f2c3ab674e3309528402703b0.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cctype>
#include <algorithm>
#include <functional>
#include <numeric>
#include <ctime>
#include <time.h>
#include "cm.h"
#include "atof.h"
#include "compress.cu"
#include "sorts.hip"
#include "filter.h"
#include "callbacks.h"
#include "zone_map.h"
#ifdef _WIN64
#define atoll(S) _atoi64(S)
#define fseek(S, S1, S2) _fseeki64(S, S1, S2)
#include <windows.h>
#else
#include <unistd.h>
#endif
using namespace std;
using namespace thrust::placeholders;
size_t total_count = 0, total_max;
clock_t tot;
unsigned int total_segments = 0, old_segments;
size_t process_count;
size_t alloced_sz = 0;
bool fact_file_loaded = 1;
bool verbose;
bool interactive, ssd, delta, star;
void* d_v = nullptr;
void* s_v = nullptr;
queue<string> op_sort;
queue<string> op_presort;
queue<string> op_type;
bool op_case = 0;
string grp_val;
queue<string> op_value;
queue<int_type> op_nums;
queue<float_type> op_nums_f;
queue<unsigned int> op_nums_precision;
queue<string> col_aliases;
map<string, map<string, col_data> > data_dict;
map<unsigned int, map<unsigned long long int, size_t> > char_hash;
map<string, char*> index_buffers;
map<string, char*> buffers;
map<string, size_t> buffer_sizes;
size_t total_buffer_size;
queue<string> buffer_names;
void* alloced_tmp;
bool alloced_switch = 0;
map<string,CudaSet*> varNames; // STL map to manage CudaSet variables
map<string, unsigned int> cpy_bits;
map<string, long long int> cpy_init_val;
char* readbuff = nullptr;
thrust::device_vector<unsigned int> rcol_matches;
thrust::device_vector<int_type> rcol_dev;
struct f_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((x-y) < EPSILON) && ((x-y) > -EPSILON));
}
};
struct f_less
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((y-x) > EPSILON);
}
};
struct f_greater
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((x-y) > EPSILON);
}
};
struct f_greater_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((x-y) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
};
struct f_less_equal
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((y-x) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
};
struct f_not_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((x-y) > EPSILON) || ((x-y) < -EPSILON);
}
};
struct long_to_float_type
{
__host__ __device__
float_type operator()(const int_type x)
{
return (float_type)x;
}
};
template <typename T>
struct power_functor : public thrust::unary_function<T,T>
{
unsigned int a;
__host__ __device__
power_functor(unsigned int a_) { a = a_; }
__host__ __device__
T operator()(T x)
{
return x*(unsigned int)pow((double)10,(double)a);
}
};
struct is_zero
{
__host__ __device__
bool operator()(const int &x)
{
return x == 0;
}
};
int get_utc_offset() {
time_t zero = 24*60*60L;
struct tm * timeptr;
int gmtime_hours;
/* get the local time for Jan 2, 1900 00:00 UTC */
timeptr = localtime( &zero );
gmtime_hours = timeptr->tm_hour;
/* if the local time is the "day before" the UTC, subtract 24 hours
from the hours to get the UTC offset */
if( timeptr->tm_mday < 2 )
gmtime_hours -= 24;
return gmtime_hours;
}
/*
the utc analogue of mktime,
(much like timegm on some systems)
*/
time_t tm_to_time_t_utc( struct tm * timeptr ) {
/* gets the epoch time relative to the local time zone,
and then adds the appropriate number of seconds to make it UTC */
return mktime( timeptr ) + get_utc_offset() * 3600;
}
/*class power_functor {
unsigned int a;
public:
power_functor(unsigned int a_) { a = a_; }
__host__ __device__ int_type operator()(int_type x) const
{
return x*(unsigned int)pow((double)10,(double)a);
}
};
*/
void allocColumns(CudaSet* a, queue<string> fields);
void copyColumns(CudaSet* a, queue<string> fields, unsigned int segment, size_t& count, bool rsz, bool flt);
void mygather(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, size_t count, size_t g_size);
void mycopy(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, size_t count, size_t g_size);
void write_compressed_char(string file_name, unsigned int index, size_t mCount);
size_t getFreeMem();
size_t getTotalSystemMemory();
void process_error(int severity, string err);
CudaSet::CudaSet(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs)
: mColumnCount(0), mRecCount(0)
{
initialize(nameRef, typeRef, sizeRef, colsRef, Recs);
source = 1;
text_source = 1;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs, string file_name, unsigned int max)
: mColumnCount(0), mRecCount(0)
{
maxRecs = max;
initialize(nameRef, typeRef, sizeRef, colsRef, Recs, file_name);
source = 1;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(const size_t RecordCount, const unsigned int ColumnCount)
{
initialize(RecordCount, ColumnCount);
keep = false;
source = 0;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(queue<string> op_sel, const queue<string> op_sel_as)
{
initialize(op_sel, op_sel_as);
keep = false;
source = 0;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(CudaSet* a, CudaSet* b, queue<string> op_sel, queue<string> op_sel_as)
{
initialize(a,b, op_sel, op_sel_as);
keep = false;
source = 0;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::~CudaSet()
{
free();
};
void CudaSet::allocColumnOnDevice(string colname, size_t RecordCount)
{
if (type[colname] != 1 ) {
d_columns_int[colname].resize(RecordCount);
}
else
d_columns_float[colname].resize(RecordCount);
};
void CudaSet::resize_join(size_t addRecs)
{
mRecCount = mRecCount + addRecs;
for(unsigned int i=0; i < columnNames.size(); i++) {
if(type[columnNames[i]] != 1) {
h_columns_int[columnNames[i]].resize(mRecCount);
}
else
h_columns_float[columnNames[i]].resize(mRecCount);
};
};
void CudaSet::resize(size_t addRecs)
{
mRecCount = mRecCount + addRecs;
for(unsigned int i=0; i < columnNames.size(); i++) {
if(type[columnNames[i]] != 1) {
h_columns_int[columnNames[i]].resize(mRecCount);
}
else {
h_columns_float[columnNames[i]].resize(mRecCount);
}
};
};
void CudaSet::deAllocColumnOnDevice(string colname)
{
if (type[colname] != 1 && !d_columns_int.empty() && d_columns_int.find(colname) != d_columns_int.end()) {
if(d_columns_int[colname].size() > 0) {
d_columns_int[colname].resize(0);
d_columns_int[colname].shrink_to_fit();
};
}
else if (type[colname] == 1 && !d_columns_float.empty()) {
if (d_columns_float[colname].size() > 0) {
d_columns_float[colname].resize(0);
d_columns_float[colname].shrink_to_fit();
};
};
};
void CudaSet::allocOnDevice(size_t RecordCount)
{
for(unsigned int i=0; i < columnNames.size(); i++)
allocColumnOnDevice(columnNames[i], RecordCount);
};
void CudaSet::deAllocOnDevice()
{
for(unsigned int i=0; i < columnNames.size(); i++) {
deAllocColumnOnDevice(columnNames[i]);
};
if(prm_d.size()) {
prm_d.resize(0);
prm_d.shrink_to_fit();
};
for (auto it=d_columns_int.begin(); it != d_columns_int.end(); ++it ) {
if(it->second.size() > 0) {
it->second.resize(0);
it->second.shrink_to_fit();
};
};
for (auto it=d_columns_float.begin(); it != d_columns_float.end(); ++it ) {
if(it->second.size() > 0) {
it->second.resize(0);
it->second.shrink_to_fit();
};
};
if(filtered) { // dealloc the source
if(varNames.find(source_name) != varNames.end()) {
varNames[source_name]->deAllocOnDevice();
};
};
};
void CudaSet::resizeDeviceColumn(size_t RecCount, string colname)
{
if (type[colname] != 1) {
d_columns_int[colname].resize(RecCount);
}
else
d_columns_float[colname].resize(RecCount);
};
void CudaSet::resizeDevice(size_t RecCount)
{
for(unsigned int i=0; i < columnNames.size(); i++) {
resizeDeviceColumn(RecCount, columnNames[i]);
};
};
bool CudaSet::onDevice(string colname)
{
if (type[colname] != 1) {
if (!d_columns_int.empty() && d_columns_int[colname].size())
return 1;
}
else
if (!d_columns_float.empty() && d_columns_float[colname].size())
return 1;
return 0;
}
CudaSet* CudaSet::copyDeviceStruct()
{
CudaSet* a = new CudaSet(mRecCount, mColumnCount);
a->not_compressed = not_compressed;
a->segCount = segCount;
a->maxRecs = maxRecs;
a->columnNames = columnNames;
a->ts_cols = ts_cols;
a->cols = cols;
a->type = type;
a->char_size = char_size;
a->decimal = decimal;
a->decimal_zeroes = decimal_zeroes;
for(unsigned int i=0; i < columnNames.size(); i++) {
if(a->type[columnNames[i]] == 0) {
a->d_columns_int[columnNames[i]] = thrust::device_vector<int_type>();
a->h_columns_int[columnNames[i]] = thrust::host_vector<int_type, uninitialized_host_allocator<int_type> >();
}
else if(a->type[columnNames[i]] == 1) {
a->d_columns_float[columnNames[i]] = thrust::device_vector<float_type>();
a->h_columns_float[columnNames[i]] = thrust::host_vector<float_type, uninitialized_host_allocator<float_type> >();
}
else {
a->h_columns_char[columnNames[i]] = nullptr;
a->d_columns_char[columnNames[i]] = nullptr;
};
};
a->load_file_name = load_file_name;
a->mRecCount = 0;
return a;
}
int_type CudaSet::readSsdSegmentsFromFile(unsigned int segNum, string colname, size_t offset, thrust::host_vector<unsigned int>& prm_vh, CudaSet* dest)
{
string f1 = load_file_name + "." + colname + "." + to_string(segNum);
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
cout << "Error opening " << f1 << " file " << endl;
exit(0);
};
unsigned int cnt, bits;
int_type lower_val;
unsigned short int val_s_r[4096/2];
char val_c_r[4096];
unsigned int val_i_r[4096/4];
unsigned long long int val_l_r[4096/8];
unsigned int idx;
bool idx_set = 0;
fread(&cnt, 4, 1, f);
fread(&lower_val, 8, 1, f);
fseek(f, cnt - (8+4) + 32, SEEK_CUR);
fread(&bits, 4, 1, f);
//cout << "lower_val bits " << lower_val << " " << bits << endl;
if(type[colname] == 0) {
//cout << "lower_val bits " << lower_val << " " << bits << endl;
for(unsigned int i = 0; i < prm_vh.size(); i++) {
if(!idx_set || prm_vh[i] >= idx + 4096/(bits/8)) {
fseek(f, 24 + prm_vh[i]*(bits/8), SEEK_SET);
idx = prm_vh[i];
idx_set = 1;
if(bits == 8) {
fread(&val_c_r[0], 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_c_r[0];
}
else if(bits == 16) {
fread(&val_s_r, 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_s_r[0];
}
if(bits == 32) {
fread(&val_i_r, 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_i_r[0];
}
if(bits == 84) {
fread(&val_l_r, 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_l_r[0];
}
}
else {
if(bits == 8) {
dest->h_columns_int[colname][i + offset] = val_c_r[prm_vh[i]-idx];
}
else if(bits == 16) {
dest->h_columns_int[colname][i + offset] = val_s_r[prm_vh[i]-idx];
}
if(bits == 32) {
dest->h_columns_int[colname][i + offset] = val_i_r[prm_vh[i]-idx];
}
if(bits == 84) {
dest->h_columns_int[colname][i + offset] = val_l_r[prm_vh[i]-idx];
}
};
};
}
else if(type[colname] == 1) {
for(unsigned int i = 0; i < prm_vh.size(); i++) {
if(!idx_set || prm_vh[i] >= idx + 4096/(bits/8)) {
fseek(f, 24 + prm_vh[i]*(bits/8), SEEK_SET);
idx = prm_vh[i];
idx_set = 1;
fread(val_c_r, 4096, 1, f);
memcpy(&dest->h_columns_float[colname][i + offset], &val_c_r[0], bits/8);
}
else {
memcpy(&dest->h_columns_float[colname][i + offset], &val_c_r[(prm_vh[i]-idx)*(bits/8)], bits/8);
};
};
}
else {
//no strings in fact tables
};
fclose(f);
return lower_val;
}
int_type CudaSet::readSsdSegmentsFromFileR(unsigned int segNum, string colname, thrust::host_vector<unsigned int>& prm_vh, thrust::host_vector<unsigned int>& dest)
{
string f1 = load_file_name + "." + colname + "." + to_string(segNum);
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
cout << "Error opening " << f1 << " file " << endl;
exit(0);
};
unsigned int cnt, bits;
int_type lower_val;
fread(&cnt, 4, 1, f);
fread(&lower_val, 8, 1, f);
fseek(f, cnt - (8+4) + 32, SEEK_CUR);
fread(&bits, 4, 1, f);
unsigned short int val_s_r[4096/2];
char val_c_r[4096];
unsigned int val_i_r[4096/4];
unsigned long long int val_l_r[4096/8];
unsigned int idx;
bool idx_set = 0;
for(unsigned int i = 0; i < prm_vh.size(); i++) {
if(!idx_set || prm_vh[i] >= idx + 4096/(bits/8)) {
fseek(f, 24 + prm_vh[i]*(bits/8), SEEK_SET);
idx = prm_vh[i];
idx_set = 1;
if(bits == 8) {
fread(val_c_r, 4096, 1, f);
dest[i] = val_c_r[0];
}
else if(bits == 16) {
fread(val_s_r, 4096, 1, f);
dest[i] = val_s_r[0];
}
if(bits == 32) {
fread(val_i_r, 4096, 1, f);
dest[i] = val_i_r[0];
}
if(bits == 84) {
fread(val_l_r, 4096, 1, f);
dest[i] = val_l_r[0];
}
}
else {
if(bits == 8) {
dest[i] = val_c_r[prm_vh[i]-idx];
}
else if(bits == 16) {
dest[i] = val_s_r[prm_vh[i]-idx];
}
if(bits == 32) {
dest[i] = val_i_r[prm_vh[i]-idx];
}
if(bits == 84) {
dest[i] = val_l_r[prm_vh[i]-idx];
}
};
};
fclose(f);
return lower_val;
}
std::clock_t tot_disk;
void CudaSet::readSegmentsFromFile(unsigned int segNum, string colname)
{
string f1 = load_file_name + "." + colname + "." + to_string(segNum);
if(type[colname] == 2)
f1 = f1 + ".idx";
std::clock_t start1 = std::clock();
if(interactive) { //check if data are in buffers
if(buffers.find(f1) == buffers.end()) { // add data to buffers
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
process_error(3, "Error opening " + string(f1) +" file " );
};
fseek(f, 0, SEEK_END);
long fileSize = ftell(f);
while(total_buffer_size + fileSize > getTotalSystemMemory() && !buffer_names.empty()) { //free some buffers
//delete [] buffers[buffer_names.front()];
hipHostFree(buffers[buffer_names.front()]);
total_buffer_size = total_buffer_size - buffer_sizes[buffer_names.front()];
buffer_sizes.erase(buffer_names.front());
buffers.erase(buffer_names.front());
buffer_names.pop();
};
fseek(f, 0, SEEK_SET);
char* buff;
hipHostMalloc((void**) &buff, fileSize,hipHostMallocDefault);
fread(buff, fileSize, 1, f);
fclose(f);
buffers[f1] = buff;
buffer_sizes[f1] = fileSize;
buffer_names.push(f1);
total_buffer_size = total_buffer_size + fileSize;
buffer_names.push(f1);
cout << "added buffer " << f1 << " " << fileSize << endl;
};
// get data from buffers
if(type[colname] != 1) {
unsigned int cnt = ((unsigned int*)buffers[f1])[0];
if(cnt > h_columns_int[colname].size()/8 + 10)
h_columns_int[colname].resize(cnt/8 + 10);
}
else {
unsigned int cnt = ((unsigned int*)buffers[f1])[0];
if(cnt > h_columns_float[colname].size()/8 + 10)
h_columns_float[colname].resize(cnt/8 + 10);
}
}
else {
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
cout << "Error opening " << f1 << " file " << endl;
exit(0);
};
if(type[colname] != 1) {
if(1 > h_columns_int[colname].size())
h_columns_int[colname].resize(1);
fread(h_columns_int[colname].data(), 4, 1, f);
unsigned int cnt = ((unsigned int*)(h_columns_int[colname].data()))[0];
if(cnt/8+10 > h_columns_int[colname].size()) {
h_columns_int[colname].resize(cnt + 10);
};
size_t rr = fread((unsigned int*)(h_columns_int[colname].data()) + 1, 1, cnt+52, f);
if(rr != cnt+52) {
char buf[1024];
sprintf(buf, "Couldn't read %d bytes from %s ,read only", cnt+52, f1.c_str());
process_error(3, string(buf));
};
}
else {
if(1 > h_columns_float[colname].size())
h_columns_float[colname].resize(1);
fread(h_columns_float[colname].data(), 4, 1, f);
unsigned int cnt = ((unsigned int*)(h_columns_float[colname].data()))[0];
if(cnt/8+10 > h_columns_float[colname].size())
h_columns_float[colname].resize(cnt + 10);
size_t rr = fread((unsigned int*)(h_columns_float[colname].data()) + 1, 1, cnt+52, f);
if(rr != cnt+52) {
char buf[1024];
sprintf(buf, "Couldn't read %d bytes from %s ,read only", cnt+52, f1.c_str());
process_error(3, string(buf));
};
}
fclose(f);
};
tot_disk = tot_disk + (std::clock() - start1);
};
void CudaSet::CopyColumnToGpu(string colname, unsigned int segment, size_t offset)
{
if(not_compressed) {
// calculate how many records we need to copy
if(segment < segCount-1) {
mRecCount = maxRecs;
}
else {
mRecCount = hostRecCount - maxRecs*(segCount-1);
};
if(type[colname] != 1) {
if(!alloced_switch) {
thrust::copy(h_columns_int[colname].begin() + maxRecs*segment, h_columns_int[colname].begin() + maxRecs*segment + mRecCount, d_columns_int[colname].begin() + offset);
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::copy(h_columns_int[colname].begin() + maxRecs*segment, h_columns_int[colname].begin() + maxRecs*segment + mRecCount, d_col);
};
}
else {
if(!alloced_switch) {
thrust::copy(h_columns_float[colname].begin() + maxRecs*segment, h_columns_float[colname].begin() + maxRecs*segment + mRecCount, d_columns_float[colname].begin() + offset);
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::copy(h_columns_float[colname].begin() + maxRecs*segment, h_columns_float[colname].begin() + maxRecs*segment + mRecCount, d_col);
};
}
}
else {
readSegmentsFromFile(segment,colname);
if(!d_v)
CUDA_SAFE_CALL(hipMalloc((void **) &d_v, 12));
if(!s_v)
CUDA_SAFE_CALL(hipMalloc((void **) &s_v, 8));
string f1;
if(type[colname] == 2) {
f1 = load_file_name + "." + colname + "." + to_string(segment) + ".idx";
}
else {
f1 = load_file_name + "." + colname + "." + to_string(segment);
};
if(type[colname] != 1) {
if(!alloced_switch) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + offset), h_columns_int[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + offset), buffers[f1], d_v, s_v, colname);
};
}
else {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(alloced_tmp, h_columns_int[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(alloced_tmp, buffers[f1], d_v, s_v, colname);
};
};
}
else {
if(decimal[colname]) {
if(!alloced_switch) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + offset) , h_columns_float[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + offset) , buffers[f1], d_v, s_v, colname);
};
if(!phase_copy) {
thrust::device_ptr<long long int> d_col_int((long long int*)thrust::raw_pointer_cast(d_columns_float[colname].data() + offset));
thrust::transform(d_col_int,d_col_int+mRecCount,d_columns_float[colname].begin(), long_to_float());
};
}
else {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(alloced_tmp, h_columns_float[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(alloced_tmp, buffers[f1], d_v, s_v, colname);
};
if(!phase_copy) {
thrust::device_ptr<long long int> d_col_int((long long int*)alloced_tmp);
thrust::device_ptr<float_type> d_col_float((float_type*)alloced_tmp);
thrust::transform(d_col_int,d_col_int+mRecCount, d_col_float, long_to_float());
};
//for(int i = 0; i < mRecCount;i++)
//cout << "DECOMP " << (float_type)(d_col_int[i]) << " " << d_col_float[i] << endl;
};
}
//else // uncompressed float
// will have to fix it later so uncompressed data will be written by segments too
}
};
}
void CudaSet::CopyColumnToGpu(string colname) // copy all segments
{
if(not_compressed) {
if(type[colname] != 1)
thrust::copy(h_columns_int[colname].begin(), h_columns_int[colname].begin() + mRecCount, d_columns_int[colname].begin());
else
thrust::copy(h_columns_float[colname].begin(), h_columns_float[colname].begin() + mRecCount, d_columns_float[colname].begin());
}
else {
if(!d_v)
CUDA_SAFE_CALL(hipMalloc((void **) &d_v, 12));
if(!s_v)
CUDA_SAFE_CALL(hipMalloc((void **) &s_v, 8));
size_t cnt = 0;
string f1;
for(unsigned int i = 0; i < segCount; i++) {
readSegmentsFromFile(i,colname);
if(type[colname] == 2) {
f1 = load_file_name + "." + colname + "." + to_string(i) + ".idx";
}
else {
f1 = load_file_name + "." + colname + "." + to_string(i);
};
if(type[colname] == 0) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + cnt), h_columns_int[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + cnt), buffers[f1], d_v, s_v, colname);
};
}
else if(type[colname] == 1) {
if(decimal[colname]) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + cnt) , h_columns_float[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + cnt) , buffers[f1], d_v, s_v, colname);
};
if(!phase_copy) {
thrust::device_ptr<long long int> d_col_int((long long int*)thrust::raw_pointer_cast(d_columns_float[colname].data() + cnt));
thrust::transform(d_col_int,d_col_int+mRecCount,d_columns_float[colname].begin() + cnt, long_to_float());
};
}
// else uncompressed float
// will have to fix it later so uncompressed data will be written by segments too
};
cnt = cnt + mRecCount;
//totalRecs = totals + mRecCount;
};
mRecCount = cnt;
};
}
void CudaSet::CopyColumnToHost(string colname, size_t offset, size_t RecCount)
{
if(type[colname] != 1) {
thrust::copy(d_columns_int[colname].begin(), d_columns_int[colname].begin() + RecCount, h_columns_int[colname].begin() + offset);
}
else
thrust::copy(d_columns_float[colname].begin(), d_columns_float[colname].begin() + RecCount, h_columns_float[colname].begin() + offset);
}
void CudaSet::CopyColumnToHost(string colname)
{
CopyColumnToHost(colname, 0, mRecCount);
}
void CudaSet::CopyToHost(size_t offset, size_t count)
{
for(unsigned int i = 0; i < columnNames.size(); i++) {
CopyColumnToHost(columnNames[i], offset, count);
};
}
float_type* CudaSet::get_float_type_by_name(string name)
{
return thrust::raw_pointer_cast(d_columns_float[name].data());
}
int_type* CudaSet::get_int_by_name(string name)
{
return thrust::raw_pointer_cast(d_columns_int[name].data());
}
float_type* CudaSet::get_host_float_by_name(string name)
{
return thrust::raw_pointer_cast(h_columns_float[name].data());
}
int_type* CudaSet::get_host_int_by_name(string name)
{
return thrust::raw_pointer_cast(h_columns_int[name].data());
}
void CudaSet::GroupBy(stack<string> columnRef)
{
if(grp.size() < mRecCount)
grp.resize(mRecCount);
thrust::fill(grp.begin(), grp.begin()+mRecCount,0);
if(scratch.size() < mRecCount)
scratch.resize(mRecCount*sizeof(bool));
thrust::device_ptr<bool> d_group((bool*)thrust::raw_pointer_cast(scratch.data()));
d_group[mRecCount-1] = 1;
for(int i = 0; i < columnRef.size(); columnRef.pop()) {
if(ts_cols[columnRef.top()]) {
queue<string> fields;
fields.push(columnRef.top());
copyFinalize(this, fields,1);
time_t start_t;
std::vector<time_t> rcol;
thrust::device_vector<int_type> unq(mRecCount);
thrust::copy(d_columns_int[columnRef.top()].begin(), d_columns_int[columnRef.top()].begin() + mRecCount, unq.begin());
auto result_end = thrust::unique(unq.begin(), unq.end());
if(unq[0] != 0 || mRecCount == 1)
start_t = unq[0]/1000;
else {
start_t = unq[1]/1000;
};
time_t end_t = unq[(result_end-unq.begin())-1]/1000;
cout << "start end " << start_t << " " << end_t << endl;
//int year_start, year_end, month_start, month_end, day_start, day_end, hour_start, hour_end, minute_start, minute_end, second_start, second_end;
//struct tm my_tm, my_tm1;
auto my_tm = *gmtime (&start_t);
auto my_tm1 = *gmtime (&end_t );
//cout << my_tm.tm_year << " " << my_tm1.tm_year << " " << my_tm.tm_min << " " << my_tm1.tm_min << " " << my_tm.tm_hour << " " << my_tm1.tm_hour << endl;
rcol.push_back(0);//1970/01/01
auto pos = grp_val.find("YEAR");
int grp_num;
if(pos != string::npos) {
grp_num = stoi(grp_val.substr(0, pos));
my_tm.tm_mon = 0;
my_tm.tm_mday = 1;
my_tm.tm_hour = 0;
my_tm.tm_min = 0;
my_tm.tm_sec = 0;
start_t = tm_to_time_t_utc(&my_tm);
rcol.push_back(start_t*1000);
while(start_t <= end_t) {
start_t = add_interval(start_t, grp_num, 0, 0, 0, 0, 0);
rcol.push_back(start_t*1000);
};
}
else {
pos = grp_val.find("MONTH");
int grp_num;
if(pos != string::npos) {
grp_num = stoi(grp_val.substr(0, pos));
my_tm.tm_mday = 1;
my_tm.tm_hour = 0;
my_tm.tm_min = 0;
my_tm.tm_sec = 0;
start_t = tm_to_time_t_utc(&my_tm);
cout << "interval " << start_t << endl;
rcol.push_back(start_t*1000);
while(start_t <= end_t) {
start_t = add_interval(start_t, 0, grp_num, 0, 0, 0, 0);
cout << "interval " << start_t << endl;
rcol.push_back(start_t*1000);
};
}
else {
pos = grp_val.find("DAY");
int grp_num;
if(pos != string::npos) {
grp_num = stoi(grp_val.substr(0, pos));
my_tm.tm_hour = 0;
my_tm.tm_min = 0;
my_tm.tm_sec = 0;
start_t = tm_to_time_t_utc(&my_tm);
rcol.push_back(start_t*1000);
while(start_t <= end_t) {
start_t = add_interval(start_t, 0, 0, grp_num, 0, 0, 0);
rcol.push_back(start_t*1000);
};
}
else {
pos = grp_val.find("HOUR");
int grp_num;
if(pos != string::npos) {
grp_num = stoi(grp_val.substr(0, pos));
my_tm.tm_min = 0;
my_tm.tm_sec = 0;
start_t = tm_to_time_t_utc(&my_tm);
rcol.push_back(start_t*1000);
while(start_t <= end_t) {
start_t = add_interval(start_t, 0, 0, 0, grp_num, 0, 0);
rcol.push_back(start_t*1000);
};
}
else {
pos = grp_val.find("MINUTE");
int grp_num;
if(pos != string::npos) {
grp_num = stoi(grp_val.substr(0, pos));
my_tm.tm_sec = 0;
start_t = tm_to_time_t_utc(&my_tm);
rcol.push_back(start_t*1000);
while(start_t <= end_t) {
start_t = add_interval(start_t, 0, 0, 0, 0, grp_num, 0);
rcol.push_back(start_t*1000);
};
}
else {
pos = grp_val.find("SECOND");
int grp_num;
if(pos != string::npos) {
grp_num = stoi(grp_val.substr(0, pos));
start_t = tm_to_time_t_utc(&my_tm);
rcol.push_back(start_t*1000);
while(start_t <= end_t) {
start_t = add_interval(start_t, 0, 0, 0, 0, 0, grp_num);
rcol.push_back(start_t*1000);
};
}
}
}
}
}
};
//thrust::device_vector<unsigned int> output(mRecCount);
rcol_matches.resize(mRecCount);
rcol_dev.resize(rcol.size());
thrust::copy(rcol.data(), rcol.data() + rcol.size(), rcol_dev.begin());
thrust::lower_bound(rcol_dev.begin(), rcol_dev.end(), d_columns_int[columnRef.top()].begin(), d_columns_int[columnRef.top()].begin() + mRecCount, rcol_matches.begin());
thrust::transform(rcol_matches.begin(), rcol_matches.begin() + mRecCount - 1, rcol_matches.begin()+1, d_group, thrust::not_equal_to<unsigned int>());
thrust::transform(rcol_matches.begin(), rcol_matches.begin() + mRecCount, rcol_matches.begin(), decrease());
d_group[mRecCount-1] = 1;
}
else {
unsigned int bits;
if(cpy_bits.empty())
bits = 0;
else
bits = cpy_bits[columnRef.top()];
if(bits == 8) {
if (type[columnRef.top()] != 1) { // int_type
thrust::device_ptr<unsigned char> src((unsigned char*)thrust::raw_pointer_cast(d_columns_int[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned char>());
}
else {
thrust::device_ptr<unsigned char> src((unsigned char*)thrust::raw_pointer_cast(d_columns_float[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned char>());
};
}
else if(bits == 16) {
if (type[columnRef.top()] != 1) { // int_type
thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(d_columns_int[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned short int>());
}
else {
thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(d_columns_float[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned short int>());
};
}
else if(bits == 32) {
if (type[columnRef.top()] != 1) { // int_type
thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(d_columns_int[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned int>());
}
else {
thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(d_columns_float[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned int>());
};
}
else {
if (type[columnRef.top()] != 1) { // int_type
thrust::transform(d_columns_int[columnRef.top()].begin(), d_columns_int[columnRef.top()].begin() + mRecCount - 1,
d_columns_int[columnRef.top()].begin()+1, d_group, thrust::not_equal_to<int_type>());
}
else {
thrust::transform(d_columns_float[columnRef.top()].begin(), d_columns_float[columnRef.top()].begin() + mRecCount - 1,
d_columns_float[columnRef.top()].begin()+1, d_group, f_not_equal_to());
};
}
};
thrust::transform(d_group, d_group+mRecCount, grp.begin(), grp.begin(), thrust::logical_or<bool>());
};
grp_count = thrust::count(grp.begin(), grp.begin()+mRecCount, 1);
cout << "grp count " << grp_count << endl;
};
void CudaSet::addDeviceColumn(int_type* col, string colname, size_t recCount)
{
if (std::find(columnNames.begin(), columnNames.end(), colname) == columnNames.end()) {
columnNames.push_back(colname);
type[colname] = 0;
d_columns_int[colname] = thrust::device_vector<int_type>(recCount);
h_columns_int[colname] = thrust::host_vector<int_type, uninitialized_host_allocator<int_type> >(recCount);
}
else { // already exists, my need to resize it
if(d_columns_int[colname].size() < recCount) {
d_columns_int[colname].resize(recCount);
};
if(h_columns_int[colname].size() < recCount) {
h_columns_int[colname].resize(recCount);
};
};
// copy data to d columns
thrust::device_ptr<int_type> d_col((int_type*)col);
thrust::copy(d_col, d_col+recCount, d_columns_int[colname].begin());
thrust::copy(d_columns_int[colname].begin(), d_columns_int[colname].begin()+recCount, h_columns_int[colname].begin());
};
void CudaSet::addDeviceColumn(float_type* col, string colname, size_t recCount, bool is_decimal)
{
if (std::find(columnNames.begin(), columnNames.end(), colname) == columnNames.end()) {
columnNames.push_back(colname);
type[colname] = 1;
d_columns_float[colname] = thrust::device_vector<float_type>(recCount);
h_columns_float[colname] = thrust::host_vector<float_type, uninitialized_host_allocator<float_type> >(recCount);
}
else { // already exists, my need to resize it
if(d_columns_float[colname].size() < recCount)
d_columns_float[colname].resize(recCount);
if(h_columns_float[colname].size() < recCount)
h_columns_float[colname].resize(recCount);
};
decimal[colname] = is_decimal;
thrust::device_ptr<float_type> d_col((float_type*)col);
thrust::copy(d_col, d_col+recCount, d_columns_float[colname].begin());
};
void CudaSet::gpu_perm(queue<string> sf, thrust::device_vector<unsigned int>& permutation) {
permutation.resize(mRecCount);
thrust::sequence(permutation.begin(), permutation.begin() + mRecCount,0,1);
unsigned int* raw_ptr = thrust::raw_pointer_cast(permutation.data());
void* temp;
CUDA_SAFE_CALL(hipMalloc((void **) &temp, mRecCount*8));
string sort_type = "ASC";
while(!sf.empty()) {
if (type[sf.front()] == 0) {
update_permutation(d_columns_int[sf.front()], raw_ptr, mRecCount, sort_type, (int_type*)temp, 64);
}
else if (type[sf.front()] == 1) {
update_permutation(d_columns_float[sf.front()], raw_ptr, mRecCount, sort_type, (float_type*)temp, 64);
}
else {
thrust::host_vector<unsigned int> permutation_h = permutation;
char* temp1 = new char[char_size[sf.front()]*mRecCount];
update_permutation_char_host(h_columns_char[sf.front()], permutation_h.data(), mRecCount, sort_type, temp1, char_size[sf.front()]);
delete [] temp1;
permutation = permutation_h;
};
sf.pop();
};
hipFree(temp);
}
void CudaSet::compress(string file_name, size_t offset, unsigned int check_type, unsigned int check_val, size_t mCount, const bool append)
{
string str(file_name);
thrust::device_vector<unsigned int> permutation;
long long int oldCount;
bool int_check = 0;
void* d;
CUDA_SAFE_CALL(hipMalloc((void **) &d, mCount*float_size));
total_count = total_count + mCount;
if (mCount > total_max && op_sort.empty()) {
total_max = mCount;
};
if(!total_segments && append) {
string s= file_name + "." + columnNames[0] + ".header";
ifstream binary_file(s.c_str(),ios::binary);
if(binary_file) {
binary_file.read((char *)&oldCount, 8);
binary_file.read((char *)&total_segments, 4);
binary_file.read((char *)&maxRecs, 4);
if(total_max < maxRecs)
total_max = maxRecs;
binary_file.close();
total_count = oldCount + mCount;
};
};
string s = file_name + ".interval";
ifstream f(s.c_str());
if (f.good()) {
f.seekg (0, f.end);
int length = f.tellg();
f.seekg (0, f.beg);
char* buff = new char[length];
f.read(buff, length);
f.close();
char* p = strtok(buff, "|");
string s1(p);
p = strtok(NULL, "|");
string s2(p);
delete [] buff;
s = file_name + ".key";
ifstream f1(s.c_str());
if (f1.good()) {
f1.seekg (0, f1.end);
length = f1.tellg();
f1.seekg (0, f1.beg);
buff = new char[length+1];
buff[length] = 0;
f1.read(buff, length);
f1.close();
string s3(buff);
delete [] buff;
load_file_name = file_name;
calc_intervals(s1, s2, s3, total_segments, append);
int_check = 1;
};
};
if(!op_sort.empty()) { //sort the segment
gpu_perm(op_sort, permutation);
};
// here we need to check for partitions and if partition_count > 0 -> create partitions
if(mCount < partition_count || partition_count == 0)
partition_count = 1;
unsigned int partition_recs = mCount/partition_count;
if(!op_sort.empty()) {
if(total_max < partition_recs)
total_max = partition_recs;
};
total_segments++;
old_segments = total_segments;
size_t new_offset;
for(unsigned int i = 0; i < columnNames.size(); i++) {
std::clock_t start1 = std::clock();
string colname = columnNames[i];
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
new_offset = 0;
if(type[colname] == 0) {
thrust::device_ptr<int_type> d_col((int_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_int[colname].begin(), d_col);
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
if (p < partition_count - 1) {
pfor_compress( (int_type*)d + new_offset, partition_recs*int_size, str, h_columns_int[colname], 0);
}
else {
pfor_compress( (int_type*)d + new_offset, (mCount - partition_recs*p)*int_size, str, h_columns_int[colname], 0);
};
new_offset = new_offset + partition_recs;
total_segments++;
};
}
else {
if(!int_check) {
thrust::copy(h_columns_int[colname].begin() + offset, h_columns_int[colname].begin() + offset + mCount, d_col);
pfor_compress( d, mCount*int_size, str, h_columns_int[colname], 0);
}
else {
pfor_compress( thrust::raw_pointer_cast(d_columns_int[colname].data()), mCount*int_size, str, h_columns_int[colname], 0);
};
};
}
else if(type[colname] == 1) {
if(decimal[colname]) {
thrust::device_ptr<float_type> d_col((float_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_float[colname].begin(), d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+mCount,d_col_dec, float_to_long());
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
if (p < partition_count - 1)
pfor_compress( (int_type*)d + new_offset, partition_recs*float_size, str, h_columns_float[colname], 1);
else
pfor_compress( (int_type*)d + new_offset, (mCount - partition_recs*p)*float_size, str, h_columns_float[colname], 1);
new_offset = new_offset + partition_recs;
total_segments++;
};
}
else {
thrust::copy(h_columns_float[colname].begin() + offset, h_columns_float[colname].begin() + offset + mCount, d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+mCount,d_col_dec, float_to_long());
pfor_compress( d, mCount*float_size, str, h_columns_float[colname], 1);
};
}
else { // do not compress -- float
thrust::device_ptr<float_type> d_col((float_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_float[colname].begin(), d_col);
thrust::copy(d_col, d_col+mRecCount, h_columns_float[colname].begin());
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
unsigned int curr_cnt;
if (p < partition_count - 1)
curr_cnt = partition_recs;
else
curr_cnt = mCount - partition_recs*p;
fstream binary_file(str.c_str(),ios::out|ios::binary|fstream::app);
binary_file.write((char *)&curr_cnt, 4);
binary_file.write((char *)(h_columns_float[colname].data() + new_offset),curr_cnt*float_size);
new_offset = new_offset + partition_recs;
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
}
else {
fstream binary_file(str.c_str(),ios::out|ios::binary|fstream::app);
binary_file.write((char *)&mCount, 4);
binary_file.write((char *)(h_columns_float[colname].data() + offset),mCount*float_size);
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
};
}
else { //char
//populate char_hash
if(append && total_segments == 1) {
string s= file_name + "." + colname;
ifstream binary_file(s.c_str(),ios::binary);
if(binary_file) {
char* strings = new char[oldCount*char_size[colname]];
binary_file.read(strings, oldCount*char_size[colname]);
binary_file.close();
unsigned int ind = std::find(columnNames.begin(), columnNames.end(), colname) - columnNames.begin();
for (unsigned int z = 0 ; z < oldCount; z++) {
char_hash[ind][MurmurHash64A(&strings[z*char_size[colname]], char_size[colname], hash_seed)/2] = z;
};
delete [] strings;
};
};
if(!op_sort.empty()) {
unsigned int* h_permutation = new unsigned int[mRecCount];
thrust::copy(permutation.begin(), permutation.end(), h_permutation);
char* t = new char[char_size[colname]*mRecCount];
apply_permutation_char_host(h_columns_char[colname], h_permutation, mRecCount, t, char_size[colname]);
delete [] h_permutation;
thrust::copy(t, t+ char_size[colname]*mRecCount, h_columns_char[colname]);
delete [] t;
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
if (p < partition_count - 1)
compress_char(str, colname, partition_recs, new_offset, total_segments-1);
else
compress_char(str, colname, mCount - partition_recs*p, new_offset, total_segments-1);
new_offset = new_offset + partition_recs;
total_segments++;
};
}
else {
compress_char(str, colname, mCount, offset, total_segments-1);
};
};
if((check_type == 1 && fact_file_loaded) || (check_type == 1 && check_val == 0)) {
if(!op_sort.empty())
writeHeader(file_name, colname, total_segments-1);
else {
writeHeader(file_name, colname, total_segments);
};
};
total_segments = old_segments;
};
hipFree(d);
if(!op_sort.empty()) {
total_segments = (old_segments-1)+partition_count;
};
permutation.resize(0);
permutation.shrink_to_fit();
}
void CudaSet::calc_intervals(string dt1, string dt2, string index, unsigned int total_segs, bool append) {
alloced_switch = 1;
not_compressed = 1;
thrust::device_vector<unsigned int> permutation;
thrust::device_vector<int_type> stencil(maxRecs);
thrust::device_vector<int_type> d_dt2(maxRecs);
thrust::device_vector<int_type> d_index(maxRecs);
phase_copy = 0;
queue<string> sf;
sf.push(dt1);
sf.push(index);
gpu_perm(sf, permutation);
for(unsigned int i = 0; i < columnNames.size(); i++) {
if(type[columnNames[i]] == 0)
apply_permutation(d_columns_int[columnNames[i]], thrust::raw_pointer_cast(permutation.data()), mRecCount, (int_type*)thrust::raw_pointer_cast(stencil.data()), 0);
else {
unsigned int* h_permutation = new unsigned int[mRecCount];
thrust::copy(permutation.begin(), permutation.end(), h_permutation);
char* t = new char[char_size[columnNames[i]]*mRecCount];
apply_permutation_char_host(h_columns_char[columnNames[i]], h_permutation, mRecCount, t, char_size[columnNames[i]]);
delete [] h_permutation;
thrust::copy(t, t+ char_size[columnNames[i]]*mRecCount, h_columns_char[columnNames[i]]);
delete [] t;
};
};
if(type[index] == 2) {
d_columns_int[index] = thrust::device_vector<int_type>(mRecCount);
h_columns_int[index] = thrust::host_vector<int_type>(mRecCount);
for(int i = 0; i < mRecCount; i++)
h_columns_int[index][i] = MurmurHash64A(&h_columns_char[index][i*char_size[index]], char_size[index], hash_seed)/2;
d_columns_int[index] = h_columns_int[index];
};
thrust::counting_iterator<unsigned int> begin(0);
gpu_interval ff(thrust::raw_pointer_cast(d_columns_int[dt1].data()), thrust::raw_pointer_cast(d_columns_int[dt2].data()), thrust::raw_pointer_cast(d_columns_int[index].data()));
thrust::for_each(begin, begin + mRecCount - 1, ff);
auto stack_count = mRecCount;
if(append) {
not_compressed = 0;
size_t mysz = 8;
if(char_size[index] > int_size)
mysz = char_size[index];
if(mysz*maxRecs > alloced_sz) {
if(alloced_sz) {
hipFree(alloced_tmp);
};
hipMalloc((void **) &alloced_tmp, mysz*maxRecs);
alloced_sz = mysz*maxRecs;
}
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
d_columns_int[dt2].resize(0);
thrust::device_vector<unsigned int> output(stack_count);
for(int i = 0; i < total_segments; i++) {
CopyColumnToGpu(dt2, i, 0);
if(thrust::count(d_col, d_col+mRecCount,0)) {
thrust::copy(d_col, d_col+mRecCount, d_dt2.begin());
if(type[index] == 2) {
string f1 = load_file_name + "." + index + "." + to_string(i) + ".hash";
FILE* f = fopen(f1.c_str(), "rb" );
unsigned int cnt;
fread(&cnt, 4, 1, f);
unsigned long long int* buff = new unsigned long long int[cnt];
fread(buff, cnt*8, 1, f);
fclose(f);
thrust::copy(buff, buff + cnt, d_index.begin());
delete [] buff;
}
else {
CopyColumnToGpu(index, i, 0);
thrust::copy(d_col, d_col+mRecCount, d_index.begin());
};
thrust::lower_bound(d_columns_int[index].begin(), d_columns_int[index].begin()+stack_count, d_index.begin(), d_index.begin() + mRecCount, output.begin());
gpu_interval_set f(thrust::raw_pointer_cast(d_columns_int[dt1].data()), thrust::raw_pointer_cast(d_dt2.data()),
thrust::raw_pointer_cast(d_index.data()), thrust::raw_pointer_cast(d_columns_int[index].data()),
thrust::raw_pointer_cast(output.data()));
thrust::for_each(begin, begin + mRecCount, f);
string str = load_file_name + "." + dt2 + "." + to_string(i);;
pfor_compress( thrust::raw_pointer_cast(d_dt2.data()), mRecCount*int_size, str, h_columns_int[dt2], 0);
};
};
}
};
void CudaSet::writeHeader(string file_name, string colname, unsigned int tot_segs) {
string str = file_name + "." + colname;
string ff = str;
str += ".header";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
binary_file.write((char *)&total_count, 8);
binary_file.write((char *)&tot_segs, 4);
binary_file.write((char *)&total_max, 4);
binary_file.write((char *)&cnt_counts[ff], 4);
//cout << "HEADER1 " << total_count << " " << tot_segs << " " << total_max << endl;
binary_file.close();
};
void CudaSet::reWriteHeader(string file_name, string colname, unsigned int tot_segs, size_t newRecs, size_t maxRecs1) {
string str = file_name + "." + colname;
string ff = str;
str += ".header";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
binary_file.write((char *)&newRecs, 8);
binary_file.write((char *)&tot_segs, 4);
binary_file.write((char *)&maxRecs1, 4);
//cout << "HEADER2 " << newRecs << endl;
binary_file.close();
};
void CudaSet::writeSortHeader(string file_name)
{
string str(file_name);
unsigned int idx;
if(!op_sort.empty()) {
str += ".sort";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
idx = (unsigned int)op_sort.size();
binary_file.write((char *)&idx, 4);
queue<string> os(op_sort);
while(!os.empty()) {
if(verbose)
cout << "sorted on " << idx << endl;
idx = os.front().size();
binary_file.write((char *)&idx, 4);
binary_file.write(os.front().data(), idx);
os.pop();
};
binary_file.close();
}
else {
str += ".sort";
remove(str.c_str());
};
str = file_name;
if(!op_presort.empty()) {
str += ".presort";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
idx = (unsigned int)op_presort.size();
binary_file.write((char *)&idx, 4);
queue<string> os(op_presort);
while(!os.empty()) {
idx = os.front().size();
binary_file.write((char *)&idx, 4);
binary_file.write(os.front().data(), idx);
os.pop();
};
binary_file.close();
}
else {
str += ".presort";
remove(str.c_str());
};
}
using namespace mgpu;
void CudaSet::Display(unsigned int limit, bool binary, bool term)
{
#define MAXCOLS 128
#define MAXFIELDSIZE 1400
//-- This should/will be converted to an array holding pointers of malloced sized structures--
char bigbuf[MAXCOLS * MAXFIELDSIZE];
memset(bigbuf, 0, MAXCOLS * MAXFIELDSIZE);
char *fields[MAXCOLS];
const char *dcolumns[MAXCOLS];
size_t mCount; // num records in play
bool print_all = 0;
string ss, str;
int rows = 0;
if(limit != 0 && limit < mRecCount)
mCount = limit;
else {
mCount = mRecCount;
print_all = 1;
};
cout << "mRecCount=" << mRecCount << " mcount = " << mCount << " term " << term << " limit=" << limit << " print_all=" << print_all << endl;
unsigned int cc =0;
unordered_map<string, FILE*> file_map;
unordered_map<string, unsigned int> len_map;
for(unsigned int i = 0; i < columnNames.size(); i++)
{
fields[cc] = &(bigbuf[cc*MAXFIELDSIZE]); // a hack to avoid malloc overheads - refine later
dcolumns[cc++] = columnNames[i].c_str();
if(string_map.find(columnNames[i]) != string_map.end()) {
auto s = string_map[columnNames[i]];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
FILE *f;
f = fopen(string_map[columnNames[i]].c_str(), "rb");
file_map[string_map[columnNames[i]]] = f;
len_map[string_map[columnNames[i]]] = len;
};
};
// The goal here is to loop fast and avoid any double handling of outgoing data - pointers are good.
if(not_compressed && prm_d.size() == 0) {
for(unsigned int i=0; i < mCount; i++) { // for each record
for(unsigned int j=0; j < columnNames.size(); j++) { // for each col
if (type[columnNames[j]] != 1) {
if(string_map.find(columnNames[j]) == string_map.end()) {
if(decimal_zeroes[columnNames[j]]) {
str = std::to_string(h_columns_int[columnNames[j]][i]);
//cout << "decimals " << columnNames[j] << " " << decimal_zeroes[columnNames[j]] << " " << h_columns_int[columnNames[j]][i] << endl;
while(str.length() <= decimal_zeroes[columnNames[j]])
str = '0' + str;
str.insert(str.length()- decimal_zeroes[columnNames[j]], ".");
sprintf(fields[j], "%s", str.c_str());
}
else {
if(!ts_cols[columnNames[j]])
sprintf(fields[j], "%lld", (h_columns_int[columnNames[j]])[i] );
else {
time_t ts = (h_columns_int[columnNames[j]][i])/1000;
auto ti = gmtime(&ts);
char buffer[30];
auto rem = (h_columns_int[columnNames[j]][i])%1000;
strftime(buffer,30,"%Y-%m-%d %H.%M.%S", ti);
//fprintf(file_pr, "%s", buffer);
//fprintf(file_pr, ".%d", rem);
sprintf(fields[j], "%s.%d", buffer,rem);
/*time_t tt = h_columns_int[columnNames[j]][i];
auto ti = localtime(&tt);
char buffer[10];
strftime(buffer,80,"%Y-%m-%d", ti);
sprintf(fields[j], "%s", buffer);
*/
};
};
}
else {
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(fields[j], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fields[j][len_map[string_map[columnNames[j]]]] ='\0'; // zero terminate string
};
}
else
sprintf(fields[j], "%.2f", (h_columns_float[columnNames[j]])[i] );
};
row_cb(mColumnCount, (char **)fields, (char **)dcolumns);
rows++;
};
}
else {
queue<string> op_vx;
for(unsigned int i = 0; i < columnNames.size(); i++)
op_vx.push(columnNames[i]);
if(prm_d.size() || source) {
allocColumns(this, op_vx);
};
unsigned int curr_seg = 0;
size_t cnt = 0;
size_t curr_count, sum_printed = 0;
resize(maxRecs);
while(sum_printed < mCount || print_all) {
if(prm_d.size() || source) { // if host arrays are empty
copyColumns(this, op_vx, curr_seg, cnt);
size_t olRecs = mRecCount;
mRecCount = olRecs;
CopyToHost(0,mRecCount);
if(sum_printed + mRecCount <= mCount || print_all)
curr_count = mRecCount;
else
curr_count = mCount - sum_printed;
}
else
curr_count = mCount;
sum_printed = sum_printed + mRecCount;
for(unsigned int i=0; i < curr_count; i++) {
for(unsigned int j=0; j < columnNames.size(); j++) {
if (type[columnNames[j]] != 1) {
if(string_map.find(columnNames[j]) == string_map.end())
sprintf(fields[j], "%lld", (h_columns_int[columnNames[j]])[i] );
else {
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(fields[j], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fields[j][len_map[string_map[columnNames[j]]]] ='\0'; // zero terminate string
};
}
else
sprintf(fields[j], "%.2f", (h_columns_float[columnNames[j]])[i] );
};
row_cb(mColumnCount, (char **)fields, (char**)dcolumns);
rows++;
};
curr_seg++;
if(curr_seg == segCount)
print_all = 0;
};
}; // end else
for(auto it = file_map.begin(); it != file_map.end(); it++)
fclose(it->second);
}
void CudaSet::Store(const string file_name, const char* sep, const unsigned int limit, const bool binary, const bool append, const bool term)
{
if (mRecCount == 0 && binary == 1 && !term) { // write tails
for(unsigned int j=0; j < columnNames.size(); j++) {
writeHeader(file_name, columnNames[j], total_segments);
};
return;
};
size_t mCount;
bool print_all = 0;
string str;
if(limit != 0 && limit < mRecCount)
mCount = limit;
else {
mCount = mRecCount;
print_all = 1;
};
if(binary == 0) {
unordered_map<string, FILE*> file_map;
unordered_map<string, unsigned int> len_map;
string bf;
unsigned int max_len = 0;
for(unsigned int j=0; j < columnNames.size(); j++) {
if(string_map.find(columnNames[j]) != string_map.end()) {
auto s = string_map[columnNames[j]];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
if(len > max_len)
max_len = len;
FILE *f;
f = fopen(string_map[columnNames[j]].c_str(), "rb");
file_map[string_map[columnNames[j]]] = f;
len_map[string_map[columnNames[j]]] = len;
};
};
bf.reserve(max_len);
FILE *file_pr;
if(!term) {
file_pr = fopen(file_name.c_str(), "w");
if (!file_pr)
cout << "Could not open file " << file_name << endl;
}
else
file_pr = stdout;
if(not_compressed && prm_d.size() == 0) {
for(unsigned int i=0; i < mCount; i++) {
for(unsigned int j=0; j < columnNames.size(); j++) {
if (type[columnNames[j]] != 1 ) {
if(string_map.find(columnNames[j]) == string_map.end()) {
if(decimal_zeroes[columnNames[j]]) {
str = std::to_string(h_columns_int[columnNames[j]][i]);
//cout << "decimals " << columnNames[j] << " " << decimal_zeroes[columnNames[j]] << " " << h_columns_int[columnNames[j]][i] << endl;
while(str.length() <= decimal_zeroes[columnNames[j]])
str = '0' + str;
str.insert(str.length()- decimal_zeroes[columnNames[j]], ".");
fprintf(file_pr, "%s", str.c_str());
}
else {
if(!ts_cols[columnNames[j]]) {
fprintf(file_pr, "%lld", (h_columns_int[columnNames[j]])[i]);
}
else {
time_t ts = (h_columns_int[columnNames[j]][i])/1000;
auto ti = gmtime(&ts);
char buffer[30];
auto rem = (h_columns_int[columnNames[j]][i])%1000;
strftime(buffer,30,"%Y-%m-%d %H.%M.%S", ti);
fprintf(file_pr, "%s", buffer);
fprintf(file_pr, ".%d", rem);
};
};
}
else {
//fprintf(file_pr, "%.*s", string_hash[columnNames[j]][h_columns_int[columnNames[j]][i]].size(), string_hash[columnNames[j]][h_columns_int[columnNames[j]][i]].c_str());
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(&bf[0], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fprintf(file_pr, "%.*s", len_map[string_map[columnNames[j]]], bf.c_str());
};
fputs(sep, file_pr);
}
else {
fprintf(file_pr, "%.2f", (h_columns_float[columnNames[j]])[i]);
fputs(sep, file_pr);
}
};
if (i != mCount -1 )
fputs("\n",file_pr);
};
if(!term)
fclose(file_pr);
}
else {
queue<string> op_vx;
string ss;
for(unsigned int j=0; j < columnNames.size(); j++)
op_vx.push(columnNames[j]);
if(prm_d.size() || source) {
allocColumns(this, op_vx);
};
unsigned int curr_seg = 0;
size_t cnt = 0;
size_t curr_count, sum_printed = 0;
mRecCount = 0;
resize(maxRecs);
while(sum_printed < mCount || print_all) {
if(prm_d.size() || source) {
copyColumns(this, op_vx, curr_seg, cnt);
if(curr_seg == 0) {
if(limit != 0 && limit < mRecCount) {
mCount = limit;
print_all = 0;
}
else {
mCount = mRecCount;
print_all = 1;
};
};
// if host arrays are empty
size_t olRecs = mRecCount;
mRecCount = olRecs;
CopyToHost(0,mRecCount);
//cout << "start " << sum_printed << " " << mRecCount << " " << mCount << endl;
if(sum_printed + mRecCount <= mCount || print_all) {
curr_count = mRecCount;
}
else {
curr_count = mCount - sum_printed;
};
}
else {
curr_count = mCount;
};
sum_printed = sum_printed + mRecCount;
//cout << "sum printed " << sum_printed << " " << curr_count << " " << curr_seg << endl;
for(unsigned int i=0; i < curr_count; i++) {
for(unsigned int j=0; j < columnNames.size(); j++) {
if (type[columnNames[j]] != 1) {
if(string_map.find(columnNames[j]) == string_map.end()) {
cout << "here3 " << endl;
if(decimal_zeroes[columnNames[j]]) {
str = std::to_string(h_columns_int[columnNames[j]][i]);
//cout << "decimals " << columnNames[j] << " " << decimal_zeroes[columnNames[j]] << " " << h_columns_int[columnNames[j]][i] << endl;
while(str.length() <= decimal_zeroes[columnNames[j]])
str = '0' + str;
str.insert(str.length()- decimal_zeroes[columnNames[j]], ".");
fprintf(file_pr, "%s", str.c_str());
}
else {
if(!ts_cols[columnNames[j]]) {
fprintf(file_pr, "%lld", (h_columns_int[columnNames[j]])[i]);
}
else {
time_t ts = (h_columns_int[columnNames[j]][i])/1000;
auto ti = gmtime(&ts);
char buffer[30];
auto rem = (h_columns_int[columnNames[j]][i])%1000;
strftime(buffer,30,"%Y-%m-%d %H.%M.%S", ti);
fprintf(file_pr, "%s", buffer);
fprintf(file_pr, ".%d", rem);
};
};
}
else {
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(&bf[0], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fprintf(file_pr, "%.*s", len_map[string_map[columnNames[j]]], bf.c_str());
};
fputs(sep, file_pr);
}
else {
fprintf(file_pr, "%.2f", (h_columns_float[columnNames[j]])[i]);
fputs(sep, file_pr);
};
};
if (i != mCount -1 && (curr_seg != segCount || i < curr_count))
fputs("\n",file_pr);
};
curr_seg++;
if(curr_seg == segCount)
print_all = 0;
};
if(!term) {
fclose(file_pr);
};
};
for(auto it = file_map.begin(); it != file_map.end(); it++)
fclose(it->second);
}
else {
//lets update the data dictionary
for(unsigned int j=0; j < columnNames.size(); j++) {
data_dict[file_name][columnNames[j]].col_type = type[columnNames[j]];
if(type[columnNames[j]] != 2) {
if(decimal[columnNames[j]])
data_dict[file_name][columnNames[j]].col_length = decimal_zeroes[columnNames[j]];
else if (ts_cols[columnNames[j]])
data_dict[file_name][columnNames[j]].col_length = UINT_MAX;
else
data_dict[file_name][columnNames[j]].col_length = 0;
}
else
data_dict[file_name][columnNames[j]].col_length = char_size[columnNames[j]];
};
save_dict = 1;
if(text_source) { //writing a binary file using a text file as a source
compress(file_name, 0, 1, 0, mCount, append);
for(unsigned int i = 0; i< columnNames.size(); i++)
if(type[columnNames[i]] == 2)
deAllocColumnOnDevice(columnNames[i]);
}
else { //writing a binary file using a binary file as a source
fact_file_loaded = 1;
size_t offset = 0;
if(!not_compressed) { // records are compressed, for example after filter op.
//decompress to host
queue<string> op_vx;
for(unsigned int i = 0; i< columnNames.size(); i++) {
op_vx.push(columnNames[i]);
};
allocColumns(this, op_vx);
size_t oldCnt = mRecCount;
mRecCount = 0;
resize(oldCnt);
mRecCount = oldCnt;
for(unsigned int i = 0; i < segCount; i++) {
size_t cnt = 0;
copyColumns(this, op_vx, i, cnt);
CopyToHost(0, mRecCount);
offset = offset + mRecCount;
compress(file_name, 0, 0, i - (segCount-1), mRecCount, append);
};
}
else {
// now we have decompressed records on the host
//call setSegments and compress columns in every segment
segCount = (mRecCount/process_count + 1);
offset = 0;
for(unsigned int z = 0; z < segCount; z++) {
if(z < segCount-1) {
if(mRecCount < process_count) {
mCount = mRecCount;
}
else {
mCount = process_count;
}
}
else {
mCount = mRecCount - (segCount-1)*process_count;
};
compress(file_name, offset, 0, z - (segCount-1), mCount, append);
offset = offset + mCount;
};
};
};
};
}
void CudaSet::compress_char(const string file_name, const string colname, const size_t mCount, const size_t offset, const unsigned int segment)
{
unsigned int len = char_size[colname];
string h_name, i_name, file_no_seg = file_name.substr(0, file_name.find_last_of("."));
i_name = file_no_seg + "." + to_string(segment) + ".idx";
h_name = file_no_seg + "." + to_string(segment) + ".hash";
fstream b_file_str, loc_hashes;
fstream binary_file_h(h_name.c_str(),ios::out|ios::binary|ios::trunc);
binary_file_h.write((char *)&mCount, 4);
if(segment == 0) {
b_file_str.open(file_no_seg.c_str(),ios::out|ios::binary|ios::trunc);
}
else {
b_file_str.open(file_no_seg.c_str(),ios::out|ios::binary|ios::app);
};
if(h_columns_int.find(colname) == h_columns_int.end()) {
h_columns_int[colname] = thrust::host_vector<int_type >(mCount);
}
else {
if(h_columns_int[colname].size() < mCount)
h_columns_int[colname].resize(mCount);
};
if(d_columns_int.find(colname) == d_columns_int.end()) {
d_columns_int[colname] = thrust::device_vector<int_type >(mCount);
}
else {
if(d_columns_int[colname].size() < mCount)
d_columns_int[colname].resize(mCount);
};
size_t cnt;
long long int* hash_array = new long long int[mCount];
map<unsigned long long int, size_t>::iterator iter;
unsigned int ind = std::find(columnNames.begin(), columnNames.end(), colname) - columnNames.begin();
for (unsigned int i = 0 ; i < mCount; i++) {
hash_array[i] = MurmurHash64A(h_columns_char[colname] + (i+offset)*len, len, hash_seed)/2;
iter = char_hash[ind].find(hash_array[i]);
if(iter == char_hash[ind].end()) {
cnt = char_hash[ind].size();
char_hash[ind][hash_array[i]] = cnt;
b_file_str.write((char *)h_columns_char[colname] + (i+offset)*len, len);
h_columns_int[colname][i] = cnt;
}
else {
h_columns_int[colname][i] = iter->second;
};
};
binary_file_h.write((char *)hash_array, 8*mCount);
delete [] hash_array;
thrust::device_vector<int_type> d_col(mCount);
thrust::copy(h_columns_int[colname].begin(), h_columns_int[colname].begin() + mCount, d_col.begin());
pfor_compress(thrust::raw_pointer_cast(d_col.data()), mCount*int_size, i_name, h_columns_int[colname], 0);
binary_file_h.close();
b_file_str.close();
};
void CudaSet::compress_int(const string file_name, const string colname, const size_t mCount)
{
std::vector<unsigned int> dict_val;
unsigned int bits_encoded;
set<int_type> dict_s;
map<int_type, unsigned int> d_ordered;
for (unsigned int i = 0 ; i < mCount; i++) {
int_type f = h_columns_int[colname][i];
dict_s.insert(f);
};
unsigned int i = 0;
for (auto it = dict_s.begin(); it != dict_s.end(); it++) {
d_ordered[*it] = i++;
};
for (unsigned int i = 0 ; i < mCount; i++) {
int_type f = h_columns_int[colname][i];
dict_val.push_back(d_ordered[f]);
};
bits_encoded = (unsigned int)ceil(log2(double(d_ordered.size()+1)));
//cout << "bits " << bits_encoded << endl;
unsigned int sz = (unsigned int)d_ordered.size();
// write to a file
fstream binary_file(file_name.c_str(),ios::out|ios::binary|ios::trunc);
binary_file.write((char *)&sz, 4);
for (auto it = d_ordered.begin(); it != d_ordered.end(); it++) {
binary_file.write((char*)(&(it->first)), int_size);
};
unsigned int fit_count = 64/bits_encoded;
unsigned long long int val = 0;
binary_file.write((char *)&fit_count, 4);
binary_file.write((char *)&bits_encoded, 4);
unsigned int curr_cnt = 1;
unsigned int vals_count = (unsigned int)dict_val.size()/fit_count;
if(!vals_count || dict_val.size()%fit_count)
vals_count++;
binary_file.write((char *)&vals_count, 4);
unsigned int real_count = (unsigned int)dict_val.size();
binary_file.write((char *)&real_count, 4);
for(unsigned int i = 0; i < dict_val.size(); i++) {
val = val | dict_val[i];
if(curr_cnt < fit_count)
val = val << bits_encoded;
if( (curr_cnt == fit_count) || (i == (dict_val.size() - 1)) ) {
if (curr_cnt < fit_count) {
val = val << ((fit_count-curr_cnt)-1)*bits_encoded;
};
curr_cnt = 1;
binary_file.write((char *)&val, int_size);
val = 0;
}
else
curr_cnt = curr_cnt + 1;
};
binary_file.close();
};
bool first_time = 1;
size_t rec_sz = 0;
size_t process_piece;
bool CudaSet::LoadBigFile(FILE* file_p, thrust::device_vector<char>& d_readbuff, thrust::device_vector<char*>& dest,
thrust::device_vector<unsigned int>& ind, thrust::device_vector<unsigned int>& dest_len)
{
const char* sep = separator.c_str();
unsigned int maxx = cols.rbegin()->first;
map<unsigned int, string>::iterator it;
bool done = 0;
std::clock_t start1 = std::clock();
vector<int> types;
vector<int> cl;
types.push_back(0);
for(int i = 0; i < maxx; i++) {
auto iter = cols.find(i+1);
if(iter != cols.end()) {
types.push_back(type[iter->second]);
cl.push_back(iter->first-1);
}
else
types.push_back(0);
};
if(first_time) {
if(process_count*4 > getFreeMem()) {
process_piece = getFreeMem()/4;
}
else
process_piece = process_count;
readbuff = new char[process_piece+1];
d_readbuff.resize(process_piece+1);
cout << "set a piece to " << process_piece << " " << getFreeMem() << endl;
};
thrust::device_vector<unsigned int> ind_cnt(1);
thrust::device_vector<char> sepp(1);
sepp[0] = *sep;
long long int total_processed = 0;
size_t recs_processed = 0;
bool finished = 0;
thrust::device_vector<long long int> dev_pos;
long long int offset;
unsigned int cnt = 1;
const unsigned int max_len = 23;
while(!done) {
auto rb = fread(readbuff, 1, process_piece, file_p);
if(readbuff[rb-1] != '\n') {
rb++;
readbuff[rb-1] = '\n';
};
if(rb < process_piece) {
done = 1;
finished = 1;
fclose(file_p);
};
if(total_processed >= process_count)
done = 1;
thrust::fill(d_readbuff.begin(), d_readbuff.end(),0);
thrust::copy(readbuff, readbuff+rb, d_readbuff.begin());
auto curr_cnt = thrust::count(d_readbuff.begin(), d_readbuff.begin() + rb, '\n') - 1;
if(recs_processed == 0 && first_time) {
rec_sz = curr_cnt;
if(finished)
rec_sz++;
total_max = curr_cnt;
};
//cout << "curr_cnt " << curr_cnt << " Memory: " << getFreeMem() << endl;
if(first_time) {
for(unsigned int i=0; i < columnNames.size(); i++) {
auto colname = columnNames[i];
if (type[colname] == 0) {
d_columns_int[colname].resize(d_columns_int[colname].size() + rec_sz);
h_columns_int[colname].resize(h_columns_int[colname].size() + rec_sz);
}
else if (type[colname] == 1) {
d_columns_float[colname].resize(d_columns_float[colname].size() + rec_sz);
h_columns_float[colname].resize(h_columns_float[colname].size() + rec_sz);
}
else {
char* c = new char[cnt*rec_sz*char_size[columnNames[i]]];
if(recs_processed > 0) {
memcpy(c, h_columns_char[columnNames[i]], recs_processed*char_size[columnNames[i]]);
delete [] h_columns_char[columnNames[i]];
};
h_columns_char[columnNames[i]] = c;
if(recs_processed == 0) {
void* temp;
CUDA_SAFE_CALL(hipMalloc((void **) &temp, char_size[columnNames[i]]*rec_sz));
hipMemset(temp,0,char_size[columnNames[i]]*rec_sz);
d_columns_char[columnNames[i]] = (char*)temp;
};
};
if(recs_processed == 0) {
ind[i] = cl[i];
void* temp;
if(type[columnNames[i]] != 2) {
if(!ts_cols[columnNames[i]]) {
CUDA_SAFE_CALL(hipMalloc((void **) &temp, max_len*rec_sz));
dest_len[i] = max_len;
}
else {
CUDA_SAFE_CALL(hipMalloc((void **) &temp, 23*rec_sz));
dest_len[i] = 23;
}
}
else {
CUDA_SAFE_CALL(hipMalloc((void **) &temp, char_size[columnNames[i]]*rec_sz));
dest_len[i] = char_size[columnNames[i]];
};
dest[i] = (char*)temp;
};
};
};
for(unsigned int i=0; i < columnNames.size(); i++) {
if(type[columnNames[i]] != 2) {
hipMemset(dest[i],0,max_len*rec_sz);
}
else {
hipMemset(dest[i],0,char_size[columnNames[i]]*rec_sz);
};
};
if(dev_pos.size() < curr_cnt+1)
dev_pos.resize(curr_cnt+1); //avoiding the unnecessary allocs
dev_pos[0] = -1;
thrust::copy_if(thrust::make_counting_iterator((unsigned long long int)0), thrust::make_counting_iterator((unsigned long long int)rb-1),
d_readbuff.begin(), dev_pos.begin()+1, _1 == '\n');
if(!finished) {
if(curr_cnt < rec_sz) {
offset = (dev_pos[curr_cnt] - rb)+1;
//cout << "PATH 1 " << dev_pos[curr_cnt] << " " << offset << endl;
fseek(file_p, offset, SEEK_CUR);
total_processed = total_processed + rb + offset;
mRecCount = curr_cnt;
}
else {
offset = (dev_pos[rec_sz] - rb)+1;
//cout << "PATH 2 " << dev_pos[rec_sz] << " " << offset << endl;
fseek(file_p, offset, SEEK_CUR);
total_processed = total_processed + rb + offset;
mRecCount = rec_sz;
};
}
else {
mRecCount = curr_cnt + 1;
};
thrust::counting_iterator<unsigned int> begin(0);
ind_cnt[0] = mColumnCount;
parse_functor ff((const char*)thrust::raw_pointer_cast(d_readbuff.data()),(char**)thrust::raw_pointer_cast(dest.data()), thrust::raw_pointer_cast(ind.data()),
thrust::raw_pointer_cast(ind_cnt.data()), thrust::raw_pointer_cast(sepp.data()), thrust::raw_pointer_cast(dev_pos.data()), thrust::raw_pointer_cast(dest_len.data()));
thrust::for_each(begin, begin + mRecCount, ff);
ind_cnt[0] = max_len;
for(int i =0; i < mColumnCount; i++) {
if(type[columnNames[i]] == 0) { //int
thrust::device_ptr<char> p1((char*)dest[i]);
if(p1[4] == '-') { //date
if(!ts_cols[columnNames[i]]) {
gpu_date date_ff((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed);
thrust::for_each(begin, begin + mRecCount, date_ff);
}
else {
gpu_tdate date_ff((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed);
thrust::for_each(begin, begin + mRecCount, date_ff);
}
}
else { //int
if(decimal[columnNames[i]]) {
thrust::device_vector<unsigned int> scale(1);
scale[0] = decimal_zeroes[columnNames[i]];
gpu_atold atold((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed,
thrust::raw_pointer_cast(ind_cnt.data()), thrust::raw_pointer_cast(scale.data()));
thrust::for_each(begin, begin + mRecCount, atold);
}
else {
gpu_atoll atoll_ff((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed,
thrust::raw_pointer_cast(ind_cnt.data()));
thrust::for_each(begin, begin + mRecCount, atoll_ff);
};
};
thrust::copy(d_columns_int[columnNames[i]].begin() + recs_processed, d_columns_int[columnNames[i]].begin()+recs_processed+mRecCount, h_columns_int[columnNames[i]].begin() + recs_processed);
}
else if(type[columnNames[i]] == 1) {
gpu_atof atof_ff((const char*)dest[i],(double*)thrust::raw_pointer_cast(d_columns_float[columnNames[i]].data()) + recs_processed,
thrust::raw_pointer_cast(ind_cnt.data()));
thrust::for_each(begin, begin + mRecCount, atof_ff);
thrust::copy(d_columns_float[columnNames[i]].begin() + recs_processed, d_columns_float[columnNames[i]].begin()+recs_processed+mRecCount, h_columns_float[columnNames[i]].begin() + recs_processed);
}
else {//char is already done
thrust::device_ptr<char> p1((char*)dest[i]);
hipMemcpy( h_columns_char[columnNames[i]] + char_size[columnNames[i]]*recs_processed, (void *)dest[i] , char_size[columnNames[i]]*mRecCount, hipMemcpyDeviceToHost);
};
};
recs_processed = recs_processed + mRecCount;
cnt++;
};
if(finished) {
for(int i =0; i < mColumnCount; i++) {
if(dest[i]) {
hipFree(dest[i]);
dest[i] = nullptr;
};
};
delete [] readbuff;
};
cout << "processed recs " << recs_processed << " " << getFreeMem() << endl;
first_time = 0;
mRecCount = recs_processed;
return finished;
};
void CudaSet::free() {
for(unsigned int i = 0; i < columnNames.size(); i++ ) {
if(type[columnNames[i]] == 0 && h_columns_int[columnNames[i]].size() ) {
h_columns_int[columnNames[i]].resize(0);
h_columns_int[columnNames[i]].shrink_to_fit();
}
else {
h_columns_float[columnNames[i]].resize(0);
h_columns_float[columnNames[i]].shrink_to_fit();
};
};
if(prm_d.size()) {
prm_d.resize(0);
prm_d.shrink_to_fit();
};
deAllocOnDevice();
};
void alloc_pool(unsigned int maxRecs) {
void* temp;
CUDA_SAFE_CALL(hipMalloc((void **) &temp, 8*maxRecs));
alloced_mem.push_back(temp);
};
bool* CudaSet::logical_and(bool* column1, bool* column2)
{
thrust::device_ptr<bool> dev_ptr1(column1);
thrust::device_ptr<bool> dev_ptr2(column2);
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, dev_ptr1, thrust::logical_and<bool>());
thrust::device_free(dev_ptr2);
return column1;
}
bool* CudaSet::logical_or(bool* column1, bool* column2)
{
thrust::device_ptr<bool> dev_ptr1(column1);
thrust::device_ptr<bool> dev_ptr2(column2);
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, dev_ptr1, thrust::logical_or<bool>());
thrust::device_free(dev_ptr2);
return column1;
}
bool* CudaSet::compare(int_type s, int_type d, int_type op_type)
{
bool res;
if (op_type == 2) // >
if(d>s) res = 1;
else res = 0;
else if (op_type == 1) // <
if(d<s) res = 1;
else res = 0;
else if (op_type == 6) // >=
if(d>=s) res = 1;
else res = 0;
else if (op_type == 5) // <=
if(d<=s) res = 1;
else res = 0;
else if (op_type == 4)// =
if(d==s) res = 1;
else res = 0;
else // !=
if(d!=s) res = 1;
else res = 0;
thrust::device_ptr<bool> p = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(p, p+mRecCount,res,(bool)0);
return thrust::raw_pointer_cast(p);
};
bool* CudaSet::compare(float_type s, float_type d, int_type op_type)
{
bool res;
if (op_type == 2) // >
if ((d-s) > EPSILON) res = 1;
else res = 0;
else if (op_type == 1) // <
if ((s-d) > EPSILON) res = 1;
else res = 0;
else if (op_type == 6) // >=
if (((d-s) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON))) res = 1;
else res = 0;
else if (op_type == 5) // <=
if (((s-d) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON))) res = 1;
else res = 0;
else if (op_type == 4)// =
if (((d-s) < EPSILON) && ((d-s) > -EPSILON)) res = 1;
else res = 0;
else // !=
if (!(((d-s) < EPSILON) && ((d-s) > -EPSILON))) res = 1;
else res = 0;
thrust::device_ptr<bool> p = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(p, p+mRecCount,res,(bool)0);
return thrust::raw_pointer_cast(p);
}
bool* CudaSet::compare(float_type* column1, float_type d, int_type op_type)
{
thrust::device_ptr<bool> res = thrust::device_malloc<bool>(mRecCount);
thrust::device_ptr<float_type> dev_ptr(column1);
if (op_type == 2) // >
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_greater());
else if (op_type == 1) // <
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_less());
else if (op_type == 6) // >=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_greater_equal_to());
else if (op_type == 5) // <=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_less_equal());
else if (op_type == 4)// =
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_equal_to());
else // !=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_not_equal_to());
return thrust::raw_pointer_cast(res);
}
bool* CudaSet::compare(int_type* column1, int_type d, int_type op_type, unsigned int p1, unsigned int p2)
{
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
thrust::device_ptr<int_type> dev_ptr(column1);
if(p2)
d = d*(unsigned int)pow(10, p2);
if (op_type == 2) // >
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::greater<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::greater<int_type>());
else if (op_type == 1) // <
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::less<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::less<int_type>());
else if (op_type == 6) // >=
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::greater_equal<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::greater_equal<int_type>());
else if (op_type == 5) // <=
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::less_equal<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::less_equal<int_type>());
else if (op_type == 4)// =
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::equal_to<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::equal_to<int_type>());
else // !=
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::not_equal_to<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::not_equal_to<int_type>());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(int_type* column1, int_type* column2, int_type op_type, unsigned int p1, unsigned int p2)
{
thrust::device_ptr<int_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr2(column2);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
if (op_type == 2) // >
if(!p1 && !p2) {
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::greater<int_type>());
}
else if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater<int_type>());
else if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::greater<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater<int_type>());
else if (op_type == 1) // <
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::less<int_type>());
else if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less<int_type>());
else if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::less<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less<int_type>());
else if (op_type == 6) // >=
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::greater_equal<int_type>());
else if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater_equal<int_type>());
else if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::greater_equal<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater_equal<int_type>());
else if (op_type == 5) // <=
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::less_equal<int_type>());
else if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less_equal<int_type>());
else if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::less_equal<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less_equal<int_type>());
else if (op_type == 4)// =
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::equal_to<int_type>());
else if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::equal_to<int_type>());
else if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::equal_to<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::equal_to<int_type>());
else // !=
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::not_equal_to<int_type>());
else if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::not_equal_to<int_type>());
else if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::not_equal_to<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::not_equal_to<int_type>());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, float_type* column2, int_type op_type)
{
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<float_type> dev_ptr2(column2);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater());
else if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less());
else if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater_equal_to());
else if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less_equal());
else if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_equal_to());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_not_equal_to());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, int_type* column2, int_type op_type)
{
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr(column2);
thrust::device_ptr<float_type> dev_ptr2 = thrust::device_malloc<float_type>(mRecCount);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
thrust::transform(dev_ptr, dev_ptr + mRecCount, dev_ptr2, long_to_float_type());
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater());
else if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less());
else if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater_equal_to());
else if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less_equal());
else if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_equal_to());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_not_equal_to());
thrust::device_free(dev_ptr2);
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(int_type* column1, float_type* column2, string op_type, bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::device_ptr<int_type> dev_ptr(column1);
thrust::transform(dev_ptr, dev_ptr + mRecCount, temp, long_to_float_type()); // in-place transformation
thrust::device_ptr<float_type> dev_ptr1(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
int_type* CudaSet::op(int_type* column1, int_type d, string op_type, bool reverse, unsigned int p1, unsigned int p2)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
//cout << "OP " << d << " " << op_type << " " << p1 << " " << p2 << endl;
thrust::device_ptr<int_type> temp((int_type*)alloced_mem.back());
thrust::device_ptr<int_type> dev_ptr1(column1);
unsigned int d1 = d;
if(p2)
d = d*(unsigned int)pow(10, p2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d1), temp, thrust::multiplies<int_type>());
}
else if (op_type.compare("ADD") == 0) {
if(!p1)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d*(unsigned int)pow(10, p2)), temp, thrust::plus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::plus<int_type>());
}
else if (op_type.compare("MINUS") == 0) {
if(!p1)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d*(unsigned int)pow(10, p2)), temp, thrust::minus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::minus<int_type>());
}
else {
if(!p1)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d*(unsigned int)pow(10, p2)), temp, thrust::divides<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::divides<int_type>());
}
}
else {
if (op_type.compare("MUL") == 0) {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::multiplies<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::multiplies<int_type>());
}
else if (op_type.compare("ADD") == 0) {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::plus<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::plus<int_type>());
}
else if (op_type.compare("MINUS") == 0) {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::minus<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::minus<int_type>());
}
else {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::divides<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::divides<int_type>());
};
};
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
int_type* CudaSet::op(int_type* column1, int_type* column2, string op_type, bool reverse, unsigned int p1, unsigned int p2)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<int_type> temp((int_type*)alloced_mem.back());
thrust::device_ptr<int_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr2(column2);
//cout << "OP " << op_type << " " << p1 << " " << p2 << " " << reverse << endl;
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::multiplies<int_type>());
}
else if (op_type.compare("ADD") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::plus<int_type>());
else if(p1 && p2) {
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::plus<int_type>());
}
else if (p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::plus<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::plus<int_type>());
}
else if (op_type.compare("MINUS") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::minus<int_type>());
else if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::minus<int_type>());
else if (p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::minus<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::minus<int_type>());
}
else {
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::divides<int_type>());
else if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::divides<int_type>());
else if (p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::divides<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::divides<int_type>());
}
}
else {
if (op_type.compare("MUL") == 0) {
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::multiplies<int_type>());
}
else if (op_type.compare("ADD") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::plus<int_type>());
else if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::plus<int_type>());
else if (p1)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::plus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), dev_ptr1, temp, thrust::plus<int_type>());
}
else if (op_type.compare("MINUS") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::minus<int_type>());
else if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::minus<int_type>());
else if (p1)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::minus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), dev_ptr1, temp, thrust::minus<int_type>());
}
else {
if(!p1 && !p2)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::divides<int_type>());
else if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::divides<int_type>());
else if (p1)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::divides<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), dev_ptr1, temp, thrust::divides<int_type>());
}
}
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(float_type* column1, float_type* column2, string op_type, bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<float_type> dev_ptr2(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(int_type* column1, float_type d, string op_type, bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::fill(temp, temp+mRecCount, d);
thrust::device_ptr<int_type> dev_ptr(column1);
thrust::device_ptr<float_type> dev_ptr1 = thrust::device_malloc<float_type>(mRecCount);
thrust::transform(dev_ptr, dev_ptr + mRecCount, dev_ptr1, long_to_float_type());
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
thrust::device_free(dev_ptr1);
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(float_type* column1, float_type d, string op_type,bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::device_ptr<float_type> dev_ptr1(column1);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
alloced_mem.pop_back();
return (float_type*)thrust::raw_pointer_cast(temp);
}
char CudaSet::loadIndex(const string index_name, const unsigned int segment)
{
FILE* f;
unsigned int bits_encoded, fit_count, vals_count, sz, real_count;
void* d_str;
string f1 = index_name + "." + to_string(segment);
char res;
if(interactive) {
if(index_buffers.find(f1) == index_buffers.end()) {
f = fopen (f1.c_str(), "rb" );
fseek(f, 0, SEEK_END);
long fileSize = ftell(f);
char* buff;
hipHostMalloc(&buff, fileSize, hipHostMallocDefault);
fseek(f, 0, SEEK_SET);
fread(buff, fileSize, 1, f);
fclose(f);
index_buffers[f1] = buff;
};
sz = ((unsigned int*)index_buffers[f1])[0];
idx_dictionary_int[index_name].clear();
for(unsigned int i = 0; i < sz; i++) {
idx_dictionary_int[index_name][((int_type*)(index_buffers[f1]+4+8*i))[0]] = i;
};
vals_count = ((unsigned int*)(index_buffers[f1]+4 +8*sz))[2];
real_count = ((unsigned int*)(index_buffers[f1]+4 +8*sz))[3];
mRecCount = real_count;
res = (index_buffers[f1]+4 +8*sz + (vals_count+2)*int_size)[0];
hipMalloc((void **) &d_str, (vals_count+2)*int_size);
hipMemcpy( d_str, (void *) &((index_buffers[f1]+4 +8*sz)[0]), (vals_count+2)*int_size, hipMemcpyHostToDevice);
if(idx_vals.count(index_name))
hipFree(idx_vals[index_name]);
idx_vals[index_name] = (unsigned long long int*)d_str;
}
else {
f = fopen (f1.c_str(), "rb" );
fread(&sz, 4, 1, f);
int_type* d_array = new int_type[sz];
idx_dictionary_int[index_name].clear();
fread((void*)d_array, sz*int_size, 1, f);
for(unsigned int i = 0; i < sz; i++) {
idx_dictionary_int[index_name][d_array[i]] = i;
//cout << index_name << " " << d_array[i] << " " << i << endl;
};
delete [] d_array;
fread(&fit_count, 4, 1, f);
fread(&bits_encoded, 4, 1, f);
fread(&vals_count, 4, 1, f);
fread(&real_count, 4, 1, f);
mRecCount = real_count;
unsigned long long int* int_array = new unsigned long long int[vals_count+2];
fseek ( f , -16 , SEEK_CUR );
fread((void*)int_array, 1, vals_count*8 + 16, f);
fread(&res, 1, 1, f);
fclose(f);
void* d_str;
hipMalloc((void **) &d_str, (vals_count+2)*int_size);
hipMemcpy( d_str, (void *) int_array, (vals_count+2)*int_size, hipMemcpyHostToDevice);
if(idx_vals.count(index_name))
hipFree(idx_vals[index_name]);
idx_vals[index_name] = (unsigned long long int*)d_str;
}
return res;
}
void CudaSet::initialize(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs, string file_name) // compressed data for DIM tables
{
mColumnCount = (unsigned int)nameRef.size();
FILE* f;
string f1;
unsigned int cnt;
char buffer[4000];
string str;
not_compressed = 0;
mRecCount = Recs;
hostRecCount = Recs;
totalRecs = Recs;
load_file_name = file_name;
f1 = file_name + ".sort";
f = fopen (f1.c_str() , "rb" );
if(f) {
unsigned int sz, idx;
fread((char *)&sz, 4, 1, f);
for(unsigned int j = 0; j < sz; j++) {
fread((char *)&idx, 4, 1, f);
fread(buffer, idx, 1, f);
str.assign(buffer, idx);
sorted_fields.push(str);
if(verbose)
cout << "segment sorted on " << str << endl;
};
fclose(f);
};
f1 = file_name + ".presort";
f = fopen (f1.c_str() , "rb" );
if(f) {
unsigned int sz, idx;
fread((char *)&sz, 4, 1, f);
for(unsigned int j = 0; j < sz; j++) {
fread((char *)&idx, 4, 1, f);
fread(buffer, idx, 1, f);
str.assign(buffer, idx);
presorted_fields.push(str);
if(verbose)
cout << "presorted on " << str << endl;
};
fclose(f);
};
tmp_table = 0;
filtered = 0;
for(unsigned int i=0; i < mColumnCount; i++) {
//f1 = file_name + "." + nameRef.front() + ".0";
//f = fopen (f1.c_str() , "rb" );
//fread((char *)&bytes, 4, 1, f); //need to read metadata such as type and length
//fclose(f);
columnNames.push_back(nameRef.front());
cols[colsRef.front()] = nameRef.front();
if (((typeRef.front()).compare("decimal") == 0) || ((typeRef.front()).compare("int") == 0)) {
f1 = file_name + "." + nameRef.front() + ".0";
f = fopen (f1.c_str() , "rb" );
if(!f) {
cout << "Couldn't find field " << nameRef.front() << endl;
exit(0);
};
for(unsigned int j = 0; j < 6; j++)
fread((char *)&cnt, 4, 1, f);
fclose(f);
compTypes[nameRef.front()] = cnt;
};
if((typeRef.front()).compare("timestamp") == 0)
ts_cols[nameRef.front()] = 1;
else
ts_cols[nameRef.front()] = 0;
if ((typeRef.front()).compare("int") == 0 || (typeRef.front()).compare("timestamp") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 0;
decimal_zeroes[nameRef.front()] = 0;
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else if ((typeRef.front()).compare("float") == 0) {
type[nameRef.front()] = 1;
decimal[nameRef.front()] = 0;
h_columns_float[nameRef.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
d_columns_float[nameRef.front()] = thrust::device_vector<float_type >();
}
else if ((typeRef.front()).compare("decimal") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 1;
decimal_zeroes[nameRef.front()] = sizeRef.front();
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else {
type[nameRef.front()] = 2;
decimal[nameRef.front()] = 0;
h_columns_char[nameRef.front()] = nullptr;
d_columns_char[nameRef.front()] = nullptr;
char_size[nameRef.front()] = sizeRef.front();
string_map[nameRef.front()] = file_name + "." + nameRef.front();
};
nameRef.pop();
typeRef.pop();
sizeRef.pop();
colsRef.pop();
};
};
void CudaSet::initialize(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs)
{
mColumnCount = (unsigned int)nameRef.size();
tmp_table = 0;
filtered = 0;
mRecCount = 0;
hostRecCount = Recs;
segCount = 0;
for(unsigned int i=0; i < mColumnCount; i++) {
columnNames.push_back(nameRef.front());
cols[colsRef.front()] = nameRef.front();
if((typeRef.front()).compare("timestamp") == 0)
ts_cols[nameRef.front()] = 1;
else
ts_cols[nameRef.front()] = 0;
if ((typeRef.front()).compare("int") == 0 || (typeRef.front()).compare("timestamp") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 0;
decimal_zeroes[nameRef.front()] = 0;
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else if ((typeRef.front()).compare("float") == 0) {
type[nameRef.front()] = 1;
decimal[nameRef.front()] = 0;
h_columns_float[nameRef.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
d_columns_float[nameRef.front()] = thrust::device_vector<float_type>();
}
else if ((typeRef.front()).compare("decimal") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 1;
decimal_zeroes[nameRef.front()] = sizeRef.front();
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else {
type[nameRef.front()] = 2;
decimal[nameRef.front()] = 0;
h_columns_char[nameRef.front()] = nullptr;
d_columns_char[nameRef.front()] = nullptr;
char_size[nameRef.front()] = sizeRef.front();
};
nameRef.pop();
typeRef.pop();
sizeRef.pop();
colsRef.pop();
};
};
void CudaSet::initialize(const size_t RecordCount, const unsigned int ColumnCount)
{
mRecCount = RecordCount;
hostRecCount = RecordCount;
mColumnCount = ColumnCount;
filtered = 0;
};
void CudaSet::initialize(queue<string> op_sel, const queue<string> op_sel_as)
{
mRecCount = 0;
mColumnCount = (unsigned int)op_sel.size();
segCount = 1;
not_compressed = 1;
filtered = 0;
col_aliases = op_sel_as;
unsigned int i = 0;
CudaSet *a;
while(!op_sel.empty()) {
for(auto it = varNames.begin(); it != varNames.end(); it++) {
a = it->second;
if(std::find(a->columnNames.begin(), a->columnNames.end(), op_sel.front()) != a->columnNames.end())
break;
};
type[op_sel.front()] = a->type[op_sel.front()];
cols[i] = op_sel.front();
decimal[op_sel.front()] = a->decimal[op_sel.front()];
decimal_zeroes[op_sel.front()] = a->decimal_zeroes[op_sel.front()];
columnNames.push_back(op_sel.front());
if (a->type[op_sel.front()] == 0) {
d_columns_int[op_sel.front()] = thrust::device_vector<int_type>();
//h_columns_int[op_sel.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
h_columns_int[op_sel.front()] = thrust::host_vector<int_type>();
}
else if (a->type[op_sel.front()] == 1) {
d_columns_float[op_sel.front()] = thrust::device_vector<float_type>();
//h_columns_float[op_sel.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
h_columns_float[op_sel.front()] = thrust::host_vector<float_type>();
}
else {
h_columns_char[op_sel.front()] = nullptr;
d_columns_char[op_sel.front()] = nullptr;
char_size[op_sel.front()] = a->char_size[op_sel.front()];
};
i++;
op_sel.pop();
};
}
void CudaSet::initialize(CudaSet* a, CudaSet* b, queue<string> op_sel, queue<string> op_sel_as)
{
mRecCount = 0;
mColumnCount = 0;
queue<string> q_cnt(op_sel);
unsigned int i = 0;
set<string> field_names;
while(!q_cnt.empty()) {
if( std::find(a->columnNames.begin(), a->columnNames.end(), q_cnt.front()) != a->columnNames.end() ||
std::find(b->columnNames.begin(), b->columnNames.end(), q_cnt.front()) != b->columnNames.end()) {
field_names.insert(q_cnt.front());
};
q_cnt.pop();
}
mColumnCount = (unsigned int)field_names.size();
maxRecs = b->maxRecs;
segCount = 1;
filtered = 0;
not_compressed = 1;
col_aliases = op_sel_as;
i = 0;
while(!op_sel.empty()) {
if(std::find(columnNames.begin(), columnNames.end(), op_sel.front()) == columnNames.end()) {
if(std::find(a->columnNames.begin(), a->columnNames.end(), op_sel.front()) != a->columnNames.end()) {
cols[i] = op_sel.front();
decimal[op_sel.front()] = a->decimal[op_sel.front()];
columnNames.push_back(op_sel.front());
type[op_sel.front()] = a->type[op_sel.front()];
ts_cols[op_sel.front()] = a->ts_cols[op_sel.front()];
if (a->type[op_sel.front()] == 0) {
d_columns_int[op_sel.front()] = thrust::device_vector<int_type>();
h_columns_int[op_sel.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
if(a->string_map.find(op_sel.front()) != a->string_map.end()) {
string_map[op_sel.front()] = a->string_map[op_sel.front()];
};
decimal[op_sel.front()] = a->decimal[op_sel.front()];
decimal_zeroes[op_sel.front()] = a->decimal_zeroes[op_sel.front()];
}
else if (a->type[op_sel.front()] == 1) {
d_columns_float[op_sel.front()] = thrust::device_vector<float_type>();
h_columns_float[op_sel.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
}
else {
h_columns_char[op_sel.front()] = nullptr;
d_columns_char[op_sel.front()] = nullptr;
char_size[op_sel.front()] = a->char_size[op_sel.front()];
string_map[op_sel.front()] = a->string_map[op_sel.front()];
};
i++;
}
else if(std::find(b->columnNames.begin(), b->columnNames.end(), op_sel.front()) != b->columnNames.end()) {
columnNames.push_back(op_sel.front());
cols[i] = op_sel.front();
decimal[op_sel.front()] = b->decimal[op_sel.front()];
type[op_sel.front()] = b->type[op_sel.front()];
ts_cols[op_sel.front()] = b->ts_cols[op_sel.front()];
if (b->type[op_sel.front()] == 0) {
d_columns_int[op_sel.front()] = thrust::device_vector<int_type>();
h_columns_int[op_sel.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
if(b->string_map.find(op_sel.front()) != b->string_map.end()) {
string_map[op_sel.front()] = b->string_map[op_sel.front()];
};
decimal[op_sel.front()] = b->decimal[op_sel.front()];
decimal_zeroes[op_sel.front()] = b->decimal_zeroes[op_sel.front()];
}
else if (b->type[op_sel.front()] == 1) {
d_columns_float[op_sel.front()] = thrust::device_vector<float_type>();
h_columns_float[op_sel.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
}
else {
h_columns_char[op_sel.front()] = nullptr;
d_columns_char[op_sel.front()] = nullptr;
char_size[op_sel.front()] = b->char_size[op_sel.front()];
string_map[op_sel.front()] = b->string_map[op_sel.front()];
};
i++;
}
}
op_sel.pop();
};
};
int_type reverse_op(int_type op_type)
{
if (op_type == 2) // >
return 1;
else if (op_type == 1) // <
return 2;
else if (op_type == 6) // >=
return 5;
else if (op_type == 5) // <=
return 6;
else return op_type;
}
size_t getFreeMem()
{
size_t available, total;
hipMemGetInfo(&available, &total);
return available;
} ;
void allocColumns(CudaSet* a, queue<string> fields)
{
if(a->filtered) {
CudaSet* t;
if(a->filtered)
t = varNames[a->source_name];
else
t = a;
if(int_size*t->maxRecs > alloced_sz) {
if(alloced_sz) {
hipFree(alloced_tmp);
};
hipMalloc((void **) &alloced_tmp, int_size*t->maxRecs);
alloced_sz = int_size*t->maxRecs;
}
}
else {
while(!fields.empty()) {
if(var_exists(a, fields.front()) && !a->onDevice(fields.front())) {
a->allocColumnOnDevice(fields.front(), a->maxRecs);
}
fields.pop();
};
};
}
void gatherColumns(CudaSet* a, CudaSet* t, string field, unsigned int segment, size_t& count)
{
if(!a->onDevice(field)) {
a->allocColumnOnDevice(field, a->maxRecs);
};
if(a->prm_index == 'R') {
mygather(field, a, t, count, a->mRecCount);
}
else {
mycopy(field, a, t, count, t->mRecCount);
a->mRecCount = t->mRecCount;
};
}
void copyFinalize(CudaSet* a, queue<string> fields, bool ts)
{
set<string> uniques;
if(scratch.size() < a->mRecCount*8)
scratch.resize(a->mRecCount*8);
thrust::device_ptr<int_type> tmp((int_type*)thrust::raw_pointer_cast(scratch.data()));
while(!fields.empty()) {
if (uniques.count(fields.front()) == 0 && var_exists(a, fields.front()) && cpy_bits.find(fields.front()) != cpy_bits.end() && (!a->ts_cols[fields.front()] || ts)) {
if(cpy_bits[fields.front()] == 8) {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<unsigned char> src((unsigned char*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned char>());
}
else {
thrust::device_ptr<unsigned char> src((unsigned char*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned char>());
};
}
else if(cpy_bits[fields.front()] == 16) {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned short>());
}
else {
thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned short>());
};
}
else if(cpy_bits[fields.front()] == 32) {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned int>());
}
else {
thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned int>());
};
}
else {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<int_type> src((int_type*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::copy(src, src+a->mRecCount, tmp);
}
else {
thrust::device_ptr<int_type> src((int_type*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::copy(src, src+a->mRecCount, tmp);
};
};
thrust::constant_iterator<int_type> iter(cpy_init_val[fields.front()]);
if(a->type[fields.front()] != 1) {
thrust::transform(tmp, tmp + a->mRecCount, iter, a->d_columns_int[fields.front()].begin(), thrust::plus<int_type>());
}
else {
thrust::device_ptr<int_type> dest((int_type*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(tmp, tmp + a->mRecCount, iter, dest, thrust::plus<int_type>());
thrust::transform(dest, dest+a->mRecCount, a->d_columns_float[fields.front()].begin(), long_to_float());
};
};
uniques.insert(fields.front());
fields.pop();
};
}
void copyColumns(CudaSet* a, queue<string> fields, unsigned int segment, size_t& count, bool rsz, bool flt)
{
//std::clock_t start1 = std::clock();
set<string> uniques;
if(a->filtered) { //filter the segment
if(flt) {
filter_op(a->fil_s, a->fil_f, segment);
};
if(rsz && a->mRecCount) {
queue<string> fields1(fields);
while(!fields1.empty()) {
a->resizeDeviceColumn(a->devRecCount + a->mRecCount, fields1.front());
fields1.pop();
};
a->devRecCount = a->devRecCount + a->mRecCount;
};
};
cpy_bits.clear();
cpy_init_val.clear();
auto f(fields);
while(!fields.empty()) {
if (uniques.count(fields.front()) == 0 && var_exists(a, fields.front())) {
if(a->filtered) {
if(a->mRecCount) {
CudaSet *t = varNames[a->source_name];
alloced_switch = 1;
t->CopyColumnToGpu(fields.front(), segment);
gatherColumns(a, t, fields.front(), segment, count);
alloced_switch = 0;
};
}
else {
if(a->mRecCount) {
a->CopyColumnToGpu(fields.front(), segment, count);
};
};
uniques.insert(fields.front());
};
fields.pop();
};
//std::cout<< "copy time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) <<'\n';
}
void mygather(string colname, CudaSet* a, CudaSet* t, size_t offset, size_t g_size)
{
if(t->type[colname] != 1 ) {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else if(cpy_bits[colname] == 16) {
thrust::device_ptr<unsigned short int> d_col_source((unsigned short int*)alloced_tmp);
thrust::device_ptr<unsigned short int> d_col_dest((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_int[colname].begin() + offset);
};
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_int[colname].begin() + offset);
};
}
else {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else if(cpy_bits[colname] == 16) {
thrust::device_ptr<unsigned short int> d_col_source((unsigned short int*)alloced_tmp);
thrust::device_ptr<unsigned short int> d_col_dest((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_float[colname].begin() + offset);
};
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_float[colname].begin() + offset);
};
}
};
void mycopy(string colname, CudaSet* a, CudaSet* t, size_t offset, size_t g_size)
{
if(t->type[colname] != 1) {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else if(cpy_bits[colname] == 16) {
thrust::device_ptr<short int> d_col_source((short int*)alloced_tmp);
thrust::device_ptr<short int> d_col_dest((short int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()+offset));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col_source((int_type*)alloced_tmp);
thrust::copy(d_col_source, d_col_source + g_size, a->d_columns_int[colname].begin() + offset);
};
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::copy(d_col, d_col + g_size, a->d_columns_int[colname].begin() + offset);
};
}
else {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else if(cpy_bits[colname] == 16) {
thrust::device_ptr<short int> d_col_source((short int*)alloced_tmp);
thrust::device_ptr<short int> d_col_dest((short int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()+offset));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col_source((int_type*)alloced_tmp);
thrust::copy(d_col_source, d_col_source + g_size, a->d_columns_float[colname].begin() + offset);
};
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::copy(d_col, d_col + g_size, a->d_columns_float[colname].begin() + offset);
};
};
};
size_t load_queue(queue<string> c1, CudaSet* right, string f2, size_t &rcount,
unsigned int start_segment, unsigned int end_segment, bool rsz, bool flt)
{
queue<string> cc;
while(!c1.empty()) {
if(std::find(right->columnNames.begin(), right->columnNames.end(), c1.front()) != right->columnNames.end()) {
if(f2 != c1.front() ) {
cc.push(c1.front());
};
};
c1.pop();
};
if(std::find(right->columnNames.begin(), right->columnNames.end(), f2) != right->columnNames.end()) {
cc.push(f2);
};
if(right->filtered) {
allocColumns(right, cc);
};
rcount = right->maxRecs;
queue<string> ct(cc);
while(!ct.empty()) {
if(right->filtered && rsz) {
right->mRecCount = 0;
}
else {
right->allocColumnOnDevice(ct.front(), rcount*right->segCount);
};
ct.pop();
};
size_t cnt_r = 0;
right->devRecCount = 0;
for(unsigned int i = start_segment; i < end_segment; i++) {
if(!right->filtered)
copyColumns(right, cc, i, cnt_r, rsz, 0);
else
copyColumns(right, cc, i, cnt_r, rsz, flt);
cnt_r = cnt_r + right->mRecCount;
};
right->mRecCount = cnt_r;
return cnt_r;
}
size_t max_char(CudaSet* a)
{
size_t max_char1 = 8;
for(unsigned int i = 0; i < a->columnNames.size(); i++) {
if(a->type[a->columnNames[i]] == 2) {
if (a->char_size[a->columnNames[i]] > max_char1)
max_char1 = a->char_size[a->columnNames[i]];
}
else if(a->type[a->columnNames[i]] == 0 && a->string_map.find(a->columnNames[i]) != a->string_map.end()) {
auto s = a->string_map[a->columnNames[i]];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
if (len > max_char1)
max_char1 = len;
};
};
return max_char1;
};
size_t max_char(CudaSet* a, queue<string> field_names)
{
size_t max_char = 8;
while (!field_names.empty()) {
if (a->type[field_names.front()] == 2) {
if (a->char_size[field_names.front()] > max_char)
max_char = a->char_size[field_names.front()];
};
field_names.pop();
};
return max_char;
};
void setSegments(CudaSet* a, queue<string> cols)
{
size_t mem_available = getFreeMem();
size_t tot_sz = 0;
while(!cols.empty()) {
if(a->type[cols.front()] != 2)
tot_sz = tot_sz + int_size;
else
tot_sz = tot_sz + a->char_size[cols.front()];
cols.pop();
};
if(a->mRecCount*tot_sz > mem_available/3) { //default is 3
a->segCount = (a->mRecCount*tot_sz)/(mem_available/5) + 1;
a->maxRecs = (a->mRecCount/a->segCount)+1;
};
};
void update_permutation_char_host(char* key, unsigned int* permutation, size_t RecCount, string SortType, char* tmp, unsigned int len)
{
str_gather_host(permutation, RecCount, (void*)key, (void*)tmp, len);
if (SortType.compare("DESC") == 0 )
str_sort_host(tmp, RecCount, permutation, 1, len);
else
str_sort_host(tmp, RecCount, permutation, 0, len);
}
void apply_permutation_char(char* key, unsigned int* permutation, size_t RecCount, char* tmp, unsigned int len)
{
// copy keys to temporary vector
hipMemcpy( (void*)tmp, (void*) key, RecCount*len, hipMemcpyDeviceToDevice);
// permute the keys
str_gather((void*)permutation, RecCount, (void*)tmp, (void*)key, len);
}
void apply_permutation_char_host(char* key, unsigned int* permutation, size_t RecCount, char* res, unsigned int len)
{
str_gather_host(permutation, RecCount, (void*)key, (void*)res, len);
}
void filter_op(const char *s, const char *f, unsigned int segment)
{
CudaSet *a, *b;
a = varNames.find(f)->second;
a->name = f;
//std::clock_t start1 = std::clock();
if(a->mRecCount == 0 && !a->filtered) {
b = new CudaSet(0,1);
}
else {
if(verbose)
cout << "FILTER " << s << " " << f << " " << getFreeMem() << '\xd';
b = varNames[s];
b->name = s;
b->string_map = a->string_map;
size_t cnt = 0;
b->sorted_fields = a->sorted_fields;
b->ts_cols = a->ts_cols;
allocColumns(a, b->fil_value);
if (b->prm_d.size() == 0) {
b->prm_d.resize(a->maxRecs);
};
//cout << endl << "MAP CHECK start " << segment << endl;
char map_check = zone_map_check(b->fil_type,b->fil_value,b->fil_nums, b->fil_nums_f, b->fil_nums_precision, a, segment);
//char map_check = 'R';
cout << endl << "MAP CHECK segment " << segment << " " << map_check << endl;
if(map_check == 'R') {
auto old_ph = phase_copy;
phase_copy = 0;
copyColumns(a, b->fil_value, segment, cnt);
phase_copy = old_ph;
bool* res = filter(b->fil_type,b->fil_value,b->fil_nums, b->fil_nums_f, b->fil_nums_precision, a, segment);
thrust::device_ptr<bool> bp((bool*)res);
b->prm_index = 'R';
b->mRecCount = thrust::count(bp, bp + (unsigned int)a->mRecCount, 1);
thrust::copy_if(thrust::make_counting_iterator((unsigned int)0), thrust::make_counting_iterator((unsigned int)a->mRecCount),
bp, b->prm_d.begin(), thrust::identity<bool>());
hipFree(res);
}
else {
b->prm_index = map_check;
if(map_check == 'A')
b->mRecCount = a->mRecCount;
else
b->mRecCount = 0;
};
if(segment == a->segCount-1)
a->deAllocOnDevice();
}
if(verbose)
cout << endl << "filter result " << b->mRecCount << endl;
}
size_t load_right(CudaSet* right, string f2, queue<string> op_g, queue<string> op_alt, size_t& rcount, unsigned int start_seg, unsigned int end_seg) {
size_t cnt_r = 0;
//if join is on strings then add integer columns to left and right tables and modify colInd1 and colInd2
// need to allocate all right columns
if(right->not_compressed) {
queue<string> op_alt1;
op_alt1.push(f2);
cnt_r = load_queue(op_alt1, right, "", rcount, start_seg, end_seg, 1, 1);
queue<string> op_alt2;
while(!op_alt.empty()) {
if(f2.compare(op_alt.front())) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), op_alt.front()) != right->columnNames.end()) {
op_alt2.push(op_alt.front());
};
};
op_alt.pop();
};
if(!op_alt2.empty())
cnt_r = load_queue(op_alt2, right, "", rcount, start_seg, end_seg, 0, 0);
}
else {
cnt_r = load_queue(op_alt, right, f2, rcount, start_seg, end_seg, 1, 1);
};
return cnt_r;
};
void insert_records(const char* f, const char* s) {
char buf[4096];
size_t size, maxRecs, cnt = 0;
string str_s, str_d;
if(varNames.find(s) == varNames.end()) {
process_error(3, "couldn't find " + string(s) );
};
CudaSet *a;
a = varNames.find(s)->second;
a->name = s;
if(varNames.find(f) == varNames.end()) {
process_error(3, "couldn't find " + string(f) );
};
CudaSet *b;
b = varNames.find(f)->second;
b->name = f;
// if both source and destination are on disk
cout << "SOURCES " << a->source << ":" << b->source << endl;
if(a->source && b->source) {
for(unsigned int i = 0; i < a->segCount; i++) {
for(unsigned int z = 0; z < a->columnNames.size(); z++) {
if(a->type[a->columnNames[z]] != 2) {
str_s = a->load_file_name + "." + a->columnNames[z] + "." + to_string(i);
str_d = b->load_file_name + "." + a->columnNames[z] + "." + to_string(b->segCount + i);
cout << str_s << " " << str_d << endl;
FILE* source = fopen(str_s.c_str(), "rb");
FILE* dest = fopen(str_d.c_str(), "wb");
while (size = fread(buf, 1, BUFSIZ, source)) {
fwrite(buf, 1, size, dest);
}
fclose(source);
fclose(dest);
}
else { //merge strings
//read b's strings
str_s = b->load_file_name + "." + b->columnNames[z];
FILE* dest = fopen(str_s.c_str(), "rb");
auto len = b->char_size[b->columnNames[z]];
map<string, unsigned long long int> map_d;
buf[len] = 0;
unsigned long long cnt = 0;
while (fread(buf, len, 1, dest)) {
map_d[buf] = cnt;
cnt++;
};
fclose(dest);
unsigned long long int cct = cnt;
str_s = a->load_file_name + "." + a->columnNames[z] + "." + to_string(i) + ".hash";
str_d = b->load_file_name + "." + b->columnNames[z] + "." + to_string(b->segCount + i) + ".hash";
FILE* source = fopen(str_s.c_str(), "rb");
dest = fopen(str_d.c_str(), "wb");
while (size = fread(buf, 1, BUFSIZ, source)) {
fwrite(buf, 1, size, dest);
}
fclose(source);
fclose(dest);
str_s = a->load_file_name + "." + a->columnNames[z];
source = fopen(str_s.c_str(), "rb");
map<unsigned long long int, string> map_s;
buf[len] = 0;
cnt = 0;
while (fread(buf, len, 1, source)) {
map_s[cnt] = buf;
cnt++;
};
fclose(source);
queue<string> op_vx;
op_vx.push(a->columnNames[z]);
allocColumns(a, op_vx);
a->resize(a->maxRecs);
a->CopyColumnToGpu(a->columnNames[z], z, 0);
a->CopyColumnToHost(a->columnNames[z]);
str_d = b->load_file_name + "." + b->columnNames[z];
fstream f_file;
f_file.open(str_d.c_str(), ios::out|ios::app|ios::binary);
for(auto j = 0; j < a->mRecCount; j++) {
auto ss = map_s[a->h_columns_int[a->columnNames[z]][j]];
if(map_d.find(ss) == map_d.end()) { //add
f_file.write((char *)ss.c_str(), len);
a->h_columns_int[a->columnNames[z]][j] = cct;
cct++;
}
else {
a->h_columns_int[a->columnNames[z]][j] = map_d[ss];
};
};
f_file.close();
thrust::device_vector<int_type> d_col(a->mRecCount);
thrust::copy(a->h_columns_int[a->columnNames[z]].begin(), a->h_columns_int[a->columnNames[z]].begin() + a->mRecCount, d_col.begin());
auto i_name = b->load_file_name + "." + b->columnNames[z] + "." + to_string(b->segCount + i) + ".idx";
pfor_compress(thrust::raw_pointer_cast(d_col.data()), a->mRecCount*int_size, i_name, a->h_columns_int[a->columnNames[z]], 0);
};
};
};
if(a->maxRecs > b->maxRecs)
maxRecs = a->maxRecs;
else
maxRecs = b->maxRecs;
for(unsigned int i = 0; i < b->columnNames.size(); i++) {
b->reWriteHeader(b->load_file_name, b->columnNames[i], a->segCount + b->segCount, a->totalRecs + b->totalRecs, maxRecs);
};
}
else if(!a->source && !b->source) { //if both source and destination are in memory
size_t oldCount = b->mRecCount;
b->resize(a->mRecCount);
for(unsigned int z = 0; z< b->mColumnCount; z++) {
if(b->type[a->columnNames[z]] == 0) {
thrust::copy(a->h_columns_int[a->columnNames[z]].begin(), a->h_columns_int[a->columnNames[z]].begin() + a->mRecCount, b->h_columns_int[b->columnNames[z]].begin() + oldCount);
}
else if(b->type[a->columnNames[z]] == 1) {
thrust::copy(a->h_columns_float[a->columnNames[z]].begin(), a->h_columns_float[a->columnNames[z]].begin() + a->mRecCount, b->h_columns_float[b->columnNames[z]].begin() + oldCount);
}
else {
hipMemcpy(b->h_columns_char[b->columnNames[z]] + b->char_size[b->columnNames[z]]*oldCount, a->h_columns_char[a->columnNames[z]], a->char_size[a->columnNames[z]]*a->mRecCount, hipMemcpyHostToHost);
};
};
}
else if(!a->source && b->source) {
total_segments = b->segCount;
total_count = b->mRecCount;
total_max = b->maxRecs;;
queue<string> op_vx;
for(unsigned int i=0; i < a->columnNames.size(); i++)
op_vx.push(a->columnNames[i]);
allocColumns(a, op_vx);
a->resize(a->maxRecs);
for(unsigned int i = 0; i < a->segCount; i++) {
if (a->filtered) {
copyColumns(a, op_vx, i, cnt);
a->CopyToHost(0, a->mRecCount);
};
a->compress(b->load_file_name, 0, 1, i - (a->segCount-1), a->mRecCount, 0);
};
for(unsigned int i = 0; i < b->columnNames.size(); i++) {
b->writeHeader(b->load_file_name, b->columnNames[i], total_segments);
};
};
};
void delete_records(const char* f) {
CudaSet *a;
a = varNames.find(f)->second;
a->name = f;
size_t totalRemoved = 0;
size_t maxRecs = 0;
if(!a->keep) { // temporary variable
process_error(2, "Delete operator is only applicable to disk based sets\nfor deleting records from derived sets please use filter operator ");
}
else { // read matching segments, delete, compress and write on a disk replacing the original segments
string str, str_old;
queue<string> op_vx;
size_t cnt;
for ( auto it=data_dict[a->load_file_name].begin() ; it != data_dict[a->load_file_name].end(); ++it ) {
op_vx.push((*it).first);
if (std::find(a->columnNames.begin(), a->columnNames.end(), (*it).first) == a->columnNames.end()) {
if ((*it).second.col_type == 0) {
a->type[(*it).first] = 0;
a->decimal[(*it).first] = 0;
//a->h_columns_int[(*it).first] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
a->h_columns_int[(*it).first] = thrust::host_vector<int_type>();
a->d_columns_int[(*it).first] = thrust::device_vector<int_type>();
}
else if((*it).second.col_type == 1) {
a->type[(*it).first] = 1;
a->decimal[(*it).first] = 0;
//a->h_columns_float[(*it).first] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
a->h_columns_float[(*it).first] = thrust::host_vector<float_type>();
a->d_columns_float[(*it).first] = thrust::device_vector<float_type>();
}
else if ((*it).second.col_type == 3) {
a->type[(*it).first] = 1;
a->decimal[(*it).first] = 1;
//a->h_columns_float[(*it).first] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
a->h_columns_float[(*it).first] = thrust::host_vector<float_type>();
a->d_columns_float[(*it).first] = thrust::device_vector<float_type>();
}
else {
a->type[(*it).first] = 2;
a->decimal[(*it).first] = 0;
a->h_columns_char[(*it).first] = nullptr;
a->d_columns_char[(*it).first] = nullptr;
a->char_size[(*it).first] = (*it).second.col_length;
};
a->columnNames.push_back((*it).first);
}
};
allocColumns(a, op_vx);
a->resize(a->maxRecs);
a->prm_d.resize(a->maxRecs);
size_t cc = a->mRecCount;
size_t tmp;
void* d;
CUDA_SAFE_CALL(hipMalloc((void **) &d, a->maxRecs*float_size));
unsigned int new_seg_count = 0;
char map_check;
for(unsigned int i = 0; i < a->segCount; i++) {
map_check = zone_map_check(op_type,op_value,op_nums, op_nums_f, op_nums_precision, a, i);
if(verbose)
cout << "MAP CHECK segment " << i << " " << map_check << endl;
if(map_check != 'N') {
cnt = 0;
copyColumns(a, op_vx, i, cnt);
tmp = a->mRecCount;
if(a->mRecCount) {
bool* res = filter(op_type,op_value,op_nums, op_nums_f, op_nums_precision, a, i);
thrust::device_ptr<bool> bp((bool*)res);
thrust::copy_if(thrust::make_counting_iterator((unsigned int)0), thrust::make_counting_iterator((unsigned int)a->mRecCount),
bp, a->prm_d.begin(), thrust::logical_not<bool>());
a->mRecCount = thrust::count(bp, bp + (unsigned int)a->mRecCount, 0);
hipFree(res);
// cout << "Remained recs count " << a->mRecCount << endl;
if(a->mRecCount > maxRecs)
maxRecs = a->mRecCount;
if (a->mRecCount) {
totalRemoved = totalRemoved + (tmp - a->mRecCount);
if (a->mRecCount == tmp) { //none deleted
if(new_seg_count != i) {
for (auto it=data_dict[a->load_file_name].begin() ; it != data_dict[a->load_file_name].end(); ++it ) {
auto colname = (*it).first;
str_old = a->load_file_name + "." + colname + "." + to_string(i);
str = a->load_file_name + "." + colname + "." + to_string(new_seg_count);
remove(str.c_str());
rename(str_old.c_str(), str.c_str());
};
};
new_seg_count++;
}
else { //some deleted
//cout << "writing segment " << new_seg_count << endl;
map<string, col_data> s = data_dict[a->load_file_name];
for ( map<string, col_data>::iterator it=s.begin() ; it != s.end(); ++it ) {
string colname = (*it).first;
str = a->load_file_name + "." + colname + "." + to_string(new_seg_count);
if(a->type[colname] == 0) {
thrust::device_ptr<int_type> d_col((int_type*)d);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_int[colname].begin(), d_col);
pfor_compress( d, a->mRecCount*int_size, str, a->h_columns_int[colname], 0);
}
else if(a->type[colname] == 1) {
thrust::device_ptr<float_type> d_col((float_type*)d);
if(a->decimal[colname]) {
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_float[colname].begin(), d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+a->mRecCount, d_col_dec, float_to_long());
pfor_compress( d, a->mRecCount*float_size, str, a->h_columns_float[colname], 1);
}
else {
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_float[colname].begin(), d_col);
thrust::copy(d_col, d_col + a->mRecCount, a->h_columns_float[colname].begin());
fstream binary_file(str.c_str(),ios::out|ios::binary);
binary_file.write((char *)&a->mRecCount, 4);
binary_file.write((char *)(a->h_columns_float[colname].data()),a->mRecCount*float_size);
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
}
else {
thrust::device_ptr<int_type> d_col((int_type*)d);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_int[colname].begin(), d_col);
pfor_compress( d, a->mRecCount*int_size, str + ".hash", a->h_columns_int[colname], 0);
};
};
new_seg_count++;
};
}
else {
totalRemoved = totalRemoved + tmp;
};
}
}
else {
if(new_seg_count != i) {
for(unsigned int z = 0; z < a->columnNames.size(); z++) {
str_old = a->load_file_name + "." + a->columnNames[z] + "." + to_string(i);
str = a->load_file_name + "." + a->columnNames[z] + "." + to_string(new_seg_count);
remove(str.c_str());
rename(str_old.c_str(), str.c_str());
};
};
new_seg_count++;
maxRecs = a->maxRecs;
};
};
if (new_seg_count < a->segCount) {
for(unsigned int i = new_seg_count; i < a->segCount; i++) {
//cout << "delete segment " << i << endl;
for(unsigned int z = 0; z < a->columnNames.size(); z++) {
str = a->load_file_name + "." + a->columnNames[z];
str += "." + to_string(i);
remove(str.c_str());
};
};
};
for(unsigned int i = new_seg_count; i < a->segCount; i++) {
a->reWriteHeader(a->load_file_name, a->columnNames[i], new_seg_count, a->totalRecs-totalRemoved, maxRecs);
};
a->mRecCount = cc;
a->prm_d.resize(0);
a->segCount = new_seg_count;
a->deAllocOnDevice();
hipFree(d);
};
};
void save_col_data(map<string, map<string, col_data> >& data_dict, string file_name)
{
size_t str_len;
fstream binary_file(file_name.c_str(),ios::out|ios::binary|ios::trunc);
size_t len = data_dict.size();
binary_file.write((char *)&len, 8);
for (auto it=data_dict.begin() ; it != data_dict.end(); ++it ) {
str_len = (*it).first.size();
binary_file.write((char *)&str_len, 8);
binary_file.write((char *)(*it).first.data(), str_len);
map<string, col_data> s = (*it).second;
size_t len1 = s.size();
binary_file.write((char *)&len1, 8);
for (auto sit=s.begin() ; sit != s.end(); ++sit ) {
str_len = (*sit).first.size();
binary_file.write((char *)&str_len, 8);
binary_file.write((char *)(*sit).first.data(), str_len);
binary_file.write((char *)&(*sit).second.col_type, 4);
binary_file.write((char *)&(*sit).second.col_length, 4);
};
};
binary_file.close();
}
void load_col_data(map<string, map<string, col_data> >& data_dict, string file_name)
{
size_t str_len, recs, len1;
string str1, str2;
char buffer[4000];
unsigned int col_type, col_length;
fstream binary_file;
binary_file.open(file_name.c_str(),ios::in|ios::binary);
if(binary_file.is_open()) {
binary_file.read((char*)&recs, 8);
for(unsigned int i = 0; i < recs; i++) {
binary_file.read((char*)&str_len, 8);
binary_file.read(buffer, str_len);
str1.assign(buffer, str_len);
binary_file.read((char*)&len1, 8);
for(unsigned int j = 0; j < len1; j++) {
binary_file.read((char*)&str_len, 8);
binary_file.read(buffer, str_len);
str2.assign(buffer, str_len);
binary_file.read((char*)&col_type, 4);
binary_file.read((char*)&col_length, 4);
data_dict[str1][str2].col_type = col_type;
data_dict[str1][str2].col_length = col_length;
//cout << "data DICT " << str1 << " " << str2 << " " << col_type << " " << col_length << endl;
};
};
binary_file.close();
}
else {
cout << "Couldn't open data dictionary" << endl;
};
}
bool var_exists(CudaSet* a, string name) {
if(std::find(a->columnNames.begin(), a->columnNames.end(), name) != a->columnNames.end())
return 1;
else
return 0;
}
int file_exist (const char *filename)
{
std::ifstream infile(filename);
return infile.good();
}
bool check_bitmap_file_exist(CudaSet* left, CudaSet* right)
{
queue<string> cols(right->fil_value);
bool bitmaps_exist = 1;
if(cols.size() == 0) {
bitmaps_exist = 0;
};
while(cols.size() ) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), cols.front()) != right->columnNames.end()) {
string fname = left->load_file_name + "." + right->load_file_name + "." + cols.front() + ".0";
if( !file_exist(fname.c_str())) {
bitmaps_exist = 0;
};
};
cols.pop();
};
return bitmaps_exist;
}
bool check_bitmaps_exist(CudaSet* left, CudaSet* right)
{
//check if there are join bitmap indexes
queue<string> cols(right->fil_value);
bool bitmaps_exist = 1;
if(cols.size() == 0) {
bitmaps_exist = 1;
return 1;
};
while(cols.size() ) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), cols.front()) != right->columnNames.end()) {
string fname = left->load_file_name + "." + right->load_file_name + "." + cols.front() + ".0";
if( !file_exist(fname.c_str())) {
bitmaps_exist = 0;
};
};
cols.pop();
};
if(bitmaps_exist) {
while(!right->fil_nums.empty() ) {
left->fil_nums.push(right->fil_nums.front());
right->fil_nums.pop();
};
while(!right->fil_nums_f.empty() ) {
left->fil_nums_f.push(right->fil_nums_f.front());
right->fil_nums_f.pop();
};
while(!right->fil_value.empty() ) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), right->fil_value.front()) != right->columnNames.end()) {
string fname = left->load_file_name + "." + right->load_file_name + "." + right->fil_value.front();
left->fil_value.push(fname);
}
else
left->fil_value.push(right->fil_value.front());
right->fil_value.pop();
};
bool add_and = 1;
if(left->fil_type.empty())
add_and = 0;
while(!right->fil_type.empty() ) {
left->fil_type.push(right->fil_type.front());
right->fil_type.pop();
};
if(add_and) {
left->fil_type.push("AND");
};
return 1;
}
else {
return 0;
};
}
void check_sort(const string str, const char* rtable, const char* rid)
{
CudaSet* right = varNames.find(rtable)->second;
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::app);
binary_file.write((char *)&right->sort_check, 1);
binary_file.close();
}
void update_char_permutation(CudaSet* a, string colname, unsigned int* raw_ptr, string ord, void* temp, bool host)
{
auto s = a->string_map[colname];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
a->h_columns_char[colname] = new char[a->mRecCount*len];
memset(a->h_columns_char[colname], 0, a->mRecCount*len);
thrust::device_ptr<unsigned int> perm(raw_ptr);
thrust::device_ptr<int_type> temp_int((int_type*)temp);
thrust::gather(perm, perm+a->mRecCount, a->d_columns_int[colname].begin(), temp_int);
//for(int z = 0 ; z < a->mRecCount; z++) {
//cout << "Init vals " << a->d_columns_int[colname][z] << " " << perm[z] << " " << temp_int[z] << endl;
//};
//cout << "sz " << a->h_columns_int[colname].size() << " " << a->d_columns_int[colname].size() << " " << len << endl;
hipMemcpy(thrust::raw_pointer_cast(a->h_columns_int[colname].data()), temp, 8*a->mRecCount, hipMemcpyDeviceToHost);
FILE *f;
f = fopen(a->string_map[colname].c_str(), "rb");
for(int z = 0 ; z < a->mRecCount; z++) {
fseek(f, a->h_columns_int[colname][z] * len, SEEK_SET);
fread(a->h_columns_char[colname] + z*len, 1, len, f);
};
fclose(f);
if(!host) {
void *d;
hipMalloc((void **) &d, a->mRecCount*len);
a->d_columns_char[colname] = (char*)d;
hipMemcpy(a->d_columns_char[colname], a->h_columns_char[colname], len*a->mRecCount, hipMemcpyHostToDevice);
if (ord.compare("DESC") == 0 )
str_sort(a->d_columns_char[colname], a->mRecCount, raw_ptr, 1, len);
else
str_sort(a->d_columns_char[colname], a->mRecCount, raw_ptr, 0, len);
hipFree(d);
}
else {
if (ord.compare("DESC") == 0 )
str_sort_host(a->h_columns_char[colname], a->mRecCount, raw_ptr, 1, len);
else
str_sort_host(a->h_columns_char[colname], a->mRecCount, raw_ptr, 0, len);
};
}
time_t add_interval(time_t t, int year, int month, int day, int hour, int minute, int second)
{
if(year) {
struct tm tt = *gmtime (&t);
tt.tm_year = tt.tm_year + year;
return tm_to_time_t_utc(&tt);
}
else if(month) {
struct tm tt = *gmtime (&t);
if(tt.tm_mon + month > 11) {
tt.tm_year++;
tt.tm_mon = ((tt.tm_mon + month) - 11)-1;
}
else
tt.tm_mon = tt.tm_mon + month;
return tm_to_time_t_utc(&tt);
}
else if(day) {
return t + day*24*60*60;
}
else if(hour) {
return t + hour*60*60;
}
else if(minute) {
return t + minute*60;
}
else {
return t + second;
}
}
#ifdef _WIN64
size_t getTotalSystemMemory()
{
MEMORYSTATUSEX status;
status.dwLength = sizeof(status);
GlobalMemoryStatusEx(&status);
return status.ullTotalPhys;
}
#else
size_t getTotalSystemMemory()
{
long pages = sysconf(_SC_PHYS_PAGES);
long page_size = sysconf(_SC_PAGE_SIZE);
return pages * page_size;
}
#endif
|
24fa66502e181b8f2c3ab674e3309528402703b0.cu
|
/*
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cctype>
#include <algorithm>
#include <functional>
#include <numeric>
#include <ctime>
#include <time.h>
#include "cm.h"
#include "atof.h"
#include "compress.cu"
#include "sorts.cu"
#include "filter.h"
#include "callbacks.h"
#include "zone_map.h"
#ifdef _WIN64
#define atoll(S) _atoi64(S)
#define fseek(S, S1, S2) _fseeki64(S, S1, S2)
#include <windows.h>
#else
#include <unistd.h>
#endif
using namespace std;
using namespace thrust::placeholders;
size_t total_count = 0, total_max;
clock_t tot;
unsigned int total_segments = 0, old_segments;
size_t process_count;
size_t alloced_sz = 0;
bool fact_file_loaded = 1;
bool verbose;
bool interactive, ssd, delta, star;
void* d_v = nullptr;
void* s_v = nullptr;
queue<string> op_sort;
queue<string> op_presort;
queue<string> op_type;
bool op_case = 0;
string grp_val;
queue<string> op_value;
queue<int_type> op_nums;
queue<float_type> op_nums_f;
queue<unsigned int> op_nums_precision;
queue<string> col_aliases;
map<string, map<string, col_data> > data_dict;
map<unsigned int, map<unsigned long long int, size_t> > char_hash;
map<string, char*> index_buffers;
map<string, char*> buffers;
map<string, size_t> buffer_sizes;
size_t total_buffer_size;
queue<string> buffer_names;
void* alloced_tmp;
bool alloced_switch = 0;
map<string,CudaSet*> varNames; // STL map to manage CudaSet variables
map<string, unsigned int> cpy_bits;
map<string, long long int> cpy_init_val;
char* readbuff = nullptr;
thrust::device_vector<unsigned int> rcol_matches;
thrust::device_vector<int_type> rcol_dev;
struct f_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((x-y) < EPSILON) && ((x-y) > -EPSILON));
}
};
struct f_less
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((y-x) > EPSILON);
}
};
struct f_greater
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((x-y) > EPSILON);
}
};
struct f_greater_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((x-y) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
};
struct f_less_equal
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((y-x) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
};
struct f_not_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((x-y) > EPSILON) || ((x-y) < -EPSILON);
}
};
struct long_to_float_type
{
__host__ __device__
float_type operator()(const int_type x)
{
return (float_type)x;
}
};
template <typename T>
struct power_functor : public thrust::unary_function<T,T>
{
unsigned int a;
__host__ __device__
power_functor(unsigned int a_) { a = a_; }
__host__ __device__
T operator()(T x)
{
return x*(unsigned int)pow((double)10,(double)a);
}
};
struct is_zero
{
__host__ __device__
bool operator()(const int &x)
{
return x == 0;
}
};
int get_utc_offset() {
time_t zero = 24*60*60L;
struct tm * timeptr;
int gmtime_hours;
/* get the local time for Jan 2, 1900 00:00 UTC */
timeptr = localtime( &zero );
gmtime_hours = timeptr->tm_hour;
/* if the local time is the "day before" the UTC, subtract 24 hours
from the hours to get the UTC offset */
if( timeptr->tm_mday < 2 )
gmtime_hours -= 24;
return gmtime_hours;
}
/*
the utc analogue of mktime,
(much like timegm on some systems)
*/
time_t tm_to_time_t_utc( struct tm * timeptr ) {
/* gets the epoch time relative to the local time zone,
and then adds the appropriate number of seconds to make it UTC */
return mktime( timeptr ) + get_utc_offset() * 3600;
}
/*class power_functor {
unsigned int a;
public:
power_functor(unsigned int a_) { a = a_; }
__host__ __device__ int_type operator()(int_type x) const
{
return x*(unsigned int)pow((double)10,(double)a);
}
};
*/
void allocColumns(CudaSet* a, queue<string> fields);
void copyColumns(CudaSet* a, queue<string> fields, unsigned int segment, size_t& count, bool rsz, bool flt);
void mygather(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, size_t count, size_t g_size);
void mycopy(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, size_t count, size_t g_size);
void write_compressed_char(string file_name, unsigned int index, size_t mCount);
size_t getFreeMem();
size_t getTotalSystemMemory();
void process_error(int severity, string err);
CudaSet::CudaSet(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs)
: mColumnCount(0), mRecCount(0)
{
initialize(nameRef, typeRef, sizeRef, colsRef, Recs);
source = 1;
text_source = 1;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs, string file_name, unsigned int max)
: mColumnCount(0), mRecCount(0)
{
maxRecs = max;
initialize(nameRef, typeRef, sizeRef, colsRef, Recs, file_name);
source = 1;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(const size_t RecordCount, const unsigned int ColumnCount)
{
initialize(RecordCount, ColumnCount);
keep = false;
source = 0;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(queue<string> op_sel, const queue<string> op_sel_as)
{
initialize(op_sel, op_sel_as);
keep = false;
source = 0;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(CudaSet* a, CudaSet* b, queue<string> op_sel, queue<string> op_sel_as)
{
initialize(a,b, op_sel, op_sel_as);
keep = false;
source = 0;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::~CudaSet()
{
free();
};
void CudaSet::allocColumnOnDevice(string colname, size_t RecordCount)
{
if (type[colname] != 1 ) {
d_columns_int[colname].resize(RecordCount);
}
else
d_columns_float[colname].resize(RecordCount);
};
void CudaSet::resize_join(size_t addRecs)
{
mRecCount = mRecCount + addRecs;
for(unsigned int i=0; i < columnNames.size(); i++) {
if(type[columnNames[i]] != 1) {
h_columns_int[columnNames[i]].resize(mRecCount);
}
else
h_columns_float[columnNames[i]].resize(mRecCount);
};
};
void CudaSet::resize(size_t addRecs)
{
mRecCount = mRecCount + addRecs;
for(unsigned int i=0; i < columnNames.size(); i++) {
if(type[columnNames[i]] != 1) {
h_columns_int[columnNames[i]].resize(mRecCount);
}
else {
h_columns_float[columnNames[i]].resize(mRecCount);
}
};
};
void CudaSet::deAllocColumnOnDevice(string colname)
{
if (type[colname] != 1 && !d_columns_int.empty() && d_columns_int.find(colname) != d_columns_int.end()) {
if(d_columns_int[colname].size() > 0) {
d_columns_int[colname].resize(0);
d_columns_int[colname].shrink_to_fit();
};
}
else if (type[colname] == 1 && !d_columns_float.empty()) {
if (d_columns_float[colname].size() > 0) {
d_columns_float[colname].resize(0);
d_columns_float[colname].shrink_to_fit();
};
};
};
void CudaSet::allocOnDevice(size_t RecordCount)
{
for(unsigned int i=0; i < columnNames.size(); i++)
allocColumnOnDevice(columnNames[i], RecordCount);
};
void CudaSet::deAllocOnDevice()
{
for(unsigned int i=0; i < columnNames.size(); i++) {
deAllocColumnOnDevice(columnNames[i]);
};
if(prm_d.size()) {
prm_d.resize(0);
prm_d.shrink_to_fit();
};
for (auto it=d_columns_int.begin(); it != d_columns_int.end(); ++it ) {
if(it->second.size() > 0) {
it->second.resize(0);
it->second.shrink_to_fit();
};
};
for (auto it=d_columns_float.begin(); it != d_columns_float.end(); ++it ) {
if(it->second.size() > 0) {
it->second.resize(0);
it->second.shrink_to_fit();
};
};
if(filtered) { // dealloc the source
if(varNames.find(source_name) != varNames.end()) {
varNames[source_name]->deAllocOnDevice();
};
};
};
void CudaSet::resizeDeviceColumn(size_t RecCount, string colname)
{
if (type[colname] != 1) {
d_columns_int[colname].resize(RecCount);
}
else
d_columns_float[colname].resize(RecCount);
};
void CudaSet::resizeDevice(size_t RecCount)
{
for(unsigned int i=0; i < columnNames.size(); i++) {
resizeDeviceColumn(RecCount, columnNames[i]);
};
};
bool CudaSet::onDevice(string colname)
{
if (type[colname] != 1) {
if (!d_columns_int.empty() && d_columns_int[colname].size())
return 1;
}
else
if (!d_columns_float.empty() && d_columns_float[colname].size())
return 1;
return 0;
}
CudaSet* CudaSet::copyDeviceStruct()
{
CudaSet* a = new CudaSet(mRecCount, mColumnCount);
a->not_compressed = not_compressed;
a->segCount = segCount;
a->maxRecs = maxRecs;
a->columnNames = columnNames;
a->ts_cols = ts_cols;
a->cols = cols;
a->type = type;
a->char_size = char_size;
a->decimal = decimal;
a->decimal_zeroes = decimal_zeroes;
for(unsigned int i=0; i < columnNames.size(); i++) {
if(a->type[columnNames[i]] == 0) {
a->d_columns_int[columnNames[i]] = thrust::device_vector<int_type>();
a->h_columns_int[columnNames[i]] = thrust::host_vector<int_type, uninitialized_host_allocator<int_type> >();
}
else if(a->type[columnNames[i]] == 1) {
a->d_columns_float[columnNames[i]] = thrust::device_vector<float_type>();
a->h_columns_float[columnNames[i]] = thrust::host_vector<float_type, uninitialized_host_allocator<float_type> >();
}
else {
a->h_columns_char[columnNames[i]] = nullptr;
a->d_columns_char[columnNames[i]] = nullptr;
};
};
a->load_file_name = load_file_name;
a->mRecCount = 0;
return a;
}
int_type CudaSet::readSsdSegmentsFromFile(unsigned int segNum, string colname, size_t offset, thrust::host_vector<unsigned int>& prm_vh, CudaSet* dest)
{
string f1 = load_file_name + "." + colname + "." + to_string(segNum);
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
cout << "Error opening " << f1 << " file " << endl;
exit(0);
};
unsigned int cnt, bits;
int_type lower_val;
unsigned short int val_s_r[4096/2];
char val_c_r[4096];
unsigned int val_i_r[4096/4];
unsigned long long int val_l_r[4096/8];
unsigned int idx;
bool idx_set = 0;
fread(&cnt, 4, 1, f);
fread(&lower_val, 8, 1, f);
fseek(f, cnt - (8+4) + 32, SEEK_CUR);
fread(&bits, 4, 1, f);
//cout << "lower_val bits " << lower_val << " " << bits << endl;
if(type[colname] == 0) {
//cout << "lower_val bits " << lower_val << " " << bits << endl;
for(unsigned int i = 0; i < prm_vh.size(); i++) {
if(!idx_set || prm_vh[i] >= idx + 4096/(bits/8)) {
fseek(f, 24 + prm_vh[i]*(bits/8), SEEK_SET);
idx = prm_vh[i];
idx_set = 1;
if(bits == 8) {
fread(&val_c_r[0], 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_c_r[0];
}
else if(bits == 16) {
fread(&val_s_r, 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_s_r[0];
}
if(bits == 32) {
fread(&val_i_r, 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_i_r[0];
}
if(bits == 84) {
fread(&val_l_r, 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_l_r[0];
}
}
else {
if(bits == 8) {
dest->h_columns_int[colname][i + offset] = val_c_r[prm_vh[i]-idx];
}
else if(bits == 16) {
dest->h_columns_int[colname][i + offset] = val_s_r[prm_vh[i]-idx];
}
if(bits == 32) {
dest->h_columns_int[colname][i + offset] = val_i_r[prm_vh[i]-idx];
}
if(bits == 84) {
dest->h_columns_int[colname][i + offset] = val_l_r[prm_vh[i]-idx];
}
};
};
}
else if(type[colname] == 1) {
for(unsigned int i = 0; i < prm_vh.size(); i++) {
if(!idx_set || prm_vh[i] >= idx + 4096/(bits/8)) {
fseek(f, 24 + prm_vh[i]*(bits/8), SEEK_SET);
idx = prm_vh[i];
idx_set = 1;
fread(val_c_r, 4096, 1, f);
memcpy(&dest->h_columns_float[colname][i + offset], &val_c_r[0], bits/8);
}
else {
memcpy(&dest->h_columns_float[colname][i + offset], &val_c_r[(prm_vh[i]-idx)*(bits/8)], bits/8);
};
};
}
else {
//no strings in fact tables
};
fclose(f);
return lower_val;
}
int_type CudaSet::readSsdSegmentsFromFileR(unsigned int segNum, string colname, thrust::host_vector<unsigned int>& prm_vh, thrust::host_vector<unsigned int>& dest)
{
string f1 = load_file_name + "." + colname + "." + to_string(segNum);
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
cout << "Error opening " << f1 << " file " << endl;
exit(0);
};
unsigned int cnt, bits;
int_type lower_val;
fread(&cnt, 4, 1, f);
fread(&lower_val, 8, 1, f);
fseek(f, cnt - (8+4) + 32, SEEK_CUR);
fread(&bits, 4, 1, f);
unsigned short int val_s_r[4096/2];
char val_c_r[4096];
unsigned int val_i_r[4096/4];
unsigned long long int val_l_r[4096/8];
unsigned int idx;
bool idx_set = 0;
for(unsigned int i = 0; i < prm_vh.size(); i++) {
if(!idx_set || prm_vh[i] >= idx + 4096/(bits/8)) {
fseek(f, 24 + prm_vh[i]*(bits/8), SEEK_SET);
idx = prm_vh[i];
idx_set = 1;
if(bits == 8) {
fread(val_c_r, 4096, 1, f);
dest[i] = val_c_r[0];
}
else if(bits == 16) {
fread(val_s_r, 4096, 1, f);
dest[i] = val_s_r[0];
}
if(bits == 32) {
fread(val_i_r, 4096, 1, f);
dest[i] = val_i_r[0];
}
if(bits == 84) {
fread(val_l_r, 4096, 1, f);
dest[i] = val_l_r[0];
}
}
else {
if(bits == 8) {
dest[i] = val_c_r[prm_vh[i]-idx];
}
else if(bits == 16) {
dest[i] = val_s_r[prm_vh[i]-idx];
}
if(bits == 32) {
dest[i] = val_i_r[prm_vh[i]-idx];
}
if(bits == 84) {
dest[i] = val_l_r[prm_vh[i]-idx];
}
};
};
fclose(f);
return lower_val;
}
std::clock_t tot_disk;
void CudaSet::readSegmentsFromFile(unsigned int segNum, string colname)
{
string f1 = load_file_name + "." + colname + "." + to_string(segNum);
if(type[colname] == 2)
f1 = f1 + ".idx";
std::clock_t start1 = std::clock();
if(interactive) { //check if data are in buffers
if(buffers.find(f1) == buffers.end()) { // add data to buffers
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
process_error(3, "Error opening " + string(f1) +" file " );
};
fseek(f, 0, SEEK_END);
long fileSize = ftell(f);
while(total_buffer_size + fileSize > getTotalSystemMemory() && !buffer_names.empty()) { //free some buffers
//delete [] buffers[buffer_names.front()];
cudaFreeHost(buffers[buffer_names.front()]);
total_buffer_size = total_buffer_size - buffer_sizes[buffer_names.front()];
buffer_sizes.erase(buffer_names.front());
buffers.erase(buffer_names.front());
buffer_names.pop();
};
fseek(f, 0, SEEK_SET);
char* buff;
cudaHostAlloc((void**) &buff, fileSize,cudaHostAllocDefault);
fread(buff, fileSize, 1, f);
fclose(f);
buffers[f1] = buff;
buffer_sizes[f1] = fileSize;
buffer_names.push(f1);
total_buffer_size = total_buffer_size + fileSize;
buffer_names.push(f1);
cout << "added buffer " << f1 << " " << fileSize << endl;
};
// get data from buffers
if(type[colname] != 1) {
unsigned int cnt = ((unsigned int*)buffers[f1])[0];
if(cnt > h_columns_int[colname].size()/8 + 10)
h_columns_int[colname].resize(cnt/8 + 10);
}
else {
unsigned int cnt = ((unsigned int*)buffers[f1])[0];
if(cnt > h_columns_float[colname].size()/8 + 10)
h_columns_float[colname].resize(cnt/8 + 10);
}
}
else {
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
cout << "Error opening " << f1 << " file " << endl;
exit(0);
};
if(type[colname] != 1) {
if(1 > h_columns_int[colname].size())
h_columns_int[colname].resize(1);
fread(h_columns_int[colname].data(), 4, 1, f);
unsigned int cnt = ((unsigned int*)(h_columns_int[colname].data()))[0];
if(cnt/8+10 > h_columns_int[colname].size()) {
h_columns_int[colname].resize(cnt + 10);
};
size_t rr = fread((unsigned int*)(h_columns_int[colname].data()) + 1, 1, cnt+52, f);
if(rr != cnt+52) {
char buf[1024];
sprintf(buf, "Couldn't read %d bytes from %s ,read only", cnt+52, f1.c_str());
process_error(3, string(buf));
};
}
else {
if(1 > h_columns_float[colname].size())
h_columns_float[colname].resize(1);
fread(h_columns_float[colname].data(), 4, 1, f);
unsigned int cnt = ((unsigned int*)(h_columns_float[colname].data()))[0];
if(cnt/8+10 > h_columns_float[colname].size())
h_columns_float[colname].resize(cnt + 10);
size_t rr = fread((unsigned int*)(h_columns_float[colname].data()) + 1, 1, cnt+52, f);
if(rr != cnt+52) {
char buf[1024];
sprintf(buf, "Couldn't read %d bytes from %s ,read only", cnt+52, f1.c_str());
process_error(3, string(buf));
};
}
fclose(f);
};
tot_disk = tot_disk + (std::clock() - start1);
};
void CudaSet::CopyColumnToGpu(string colname, unsigned int segment, size_t offset)
{
if(not_compressed) {
// calculate how many records we need to copy
if(segment < segCount-1) {
mRecCount = maxRecs;
}
else {
mRecCount = hostRecCount - maxRecs*(segCount-1);
};
if(type[colname] != 1) {
if(!alloced_switch) {
thrust::copy(h_columns_int[colname].begin() + maxRecs*segment, h_columns_int[colname].begin() + maxRecs*segment + mRecCount, d_columns_int[colname].begin() + offset);
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::copy(h_columns_int[colname].begin() + maxRecs*segment, h_columns_int[colname].begin() + maxRecs*segment + mRecCount, d_col);
};
}
else {
if(!alloced_switch) {
thrust::copy(h_columns_float[colname].begin() + maxRecs*segment, h_columns_float[colname].begin() + maxRecs*segment + mRecCount, d_columns_float[colname].begin() + offset);
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::copy(h_columns_float[colname].begin() + maxRecs*segment, h_columns_float[colname].begin() + maxRecs*segment + mRecCount, d_col);
};
}
}
else {
readSegmentsFromFile(segment,colname);
if(!d_v)
CUDA_SAFE_CALL(cudaMalloc((void **) &d_v, 12));
if(!s_v)
CUDA_SAFE_CALL(cudaMalloc((void **) &s_v, 8));
string f1;
if(type[colname] == 2) {
f1 = load_file_name + "." + colname + "." + to_string(segment) + ".idx";
}
else {
f1 = load_file_name + "." + colname + "." + to_string(segment);
};
if(type[colname] != 1) {
if(!alloced_switch) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + offset), h_columns_int[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + offset), buffers[f1], d_v, s_v, colname);
};
}
else {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(alloced_tmp, h_columns_int[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(alloced_tmp, buffers[f1], d_v, s_v, colname);
};
};
}
else {
if(decimal[colname]) {
if(!alloced_switch) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + offset) , h_columns_float[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + offset) , buffers[f1], d_v, s_v, colname);
};
if(!phase_copy) {
thrust::device_ptr<long long int> d_col_int((long long int*)thrust::raw_pointer_cast(d_columns_float[colname].data() + offset));
thrust::transform(d_col_int,d_col_int+mRecCount,d_columns_float[colname].begin(), long_to_float());
};
}
else {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(alloced_tmp, h_columns_float[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(alloced_tmp, buffers[f1], d_v, s_v, colname);
};
if(!phase_copy) {
thrust::device_ptr<long long int> d_col_int((long long int*)alloced_tmp);
thrust::device_ptr<float_type> d_col_float((float_type*)alloced_tmp);
thrust::transform(d_col_int,d_col_int+mRecCount, d_col_float, long_to_float());
};
//for(int i = 0; i < mRecCount;i++)
//cout << "DECOMP " << (float_type)(d_col_int[i]) << " " << d_col_float[i] << endl;
};
}
//else // uncompressed float
// will have to fix it later so uncompressed data will be written by segments too
}
};
}
void CudaSet::CopyColumnToGpu(string colname) // copy all segments
{
if(not_compressed) {
if(type[colname] != 1)
thrust::copy(h_columns_int[colname].begin(), h_columns_int[colname].begin() + mRecCount, d_columns_int[colname].begin());
else
thrust::copy(h_columns_float[colname].begin(), h_columns_float[colname].begin() + mRecCount, d_columns_float[colname].begin());
}
else {
if(!d_v)
CUDA_SAFE_CALL(cudaMalloc((void **) &d_v, 12));
if(!s_v)
CUDA_SAFE_CALL(cudaMalloc((void **) &s_v, 8));
size_t cnt = 0;
string f1;
for(unsigned int i = 0; i < segCount; i++) {
readSegmentsFromFile(i,colname);
if(type[colname] == 2) {
f1 = load_file_name + "." + colname + "." + to_string(i) + ".idx";
}
else {
f1 = load_file_name + "." + colname + "." + to_string(i);
};
if(type[colname] == 0) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + cnt), h_columns_int[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + cnt), buffers[f1], d_v, s_v, colname);
};
}
else if(type[colname] == 1) {
if(decimal[colname]) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + cnt) , h_columns_float[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + cnt) , buffers[f1], d_v, s_v, colname);
};
if(!phase_copy) {
thrust::device_ptr<long long int> d_col_int((long long int*)thrust::raw_pointer_cast(d_columns_float[colname].data() + cnt));
thrust::transform(d_col_int,d_col_int+mRecCount,d_columns_float[colname].begin() + cnt, long_to_float());
};
}
// else uncompressed float
// will have to fix it later so uncompressed data will be written by segments too
};
cnt = cnt + mRecCount;
//totalRecs = totals + mRecCount;
};
mRecCount = cnt;
};
}
void CudaSet::CopyColumnToHost(string colname, size_t offset, size_t RecCount)
{
if(type[colname] != 1) {
thrust::copy(d_columns_int[colname].begin(), d_columns_int[colname].begin() + RecCount, h_columns_int[colname].begin() + offset);
}
else
thrust::copy(d_columns_float[colname].begin(), d_columns_float[colname].begin() + RecCount, h_columns_float[colname].begin() + offset);
}
void CudaSet::CopyColumnToHost(string colname)
{
CopyColumnToHost(colname, 0, mRecCount);
}
void CudaSet::CopyToHost(size_t offset, size_t count)
{
for(unsigned int i = 0; i < columnNames.size(); i++) {
CopyColumnToHost(columnNames[i], offset, count);
};
}
float_type* CudaSet::get_float_type_by_name(string name)
{
return thrust::raw_pointer_cast(d_columns_float[name].data());
}
int_type* CudaSet::get_int_by_name(string name)
{
return thrust::raw_pointer_cast(d_columns_int[name].data());
}
float_type* CudaSet::get_host_float_by_name(string name)
{
return thrust::raw_pointer_cast(h_columns_float[name].data());
}
int_type* CudaSet::get_host_int_by_name(string name)
{
return thrust::raw_pointer_cast(h_columns_int[name].data());
}
void CudaSet::GroupBy(stack<string> columnRef)
{
if(grp.size() < mRecCount)
grp.resize(mRecCount);
thrust::fill(grp.begin(), grp.begin()+mRecCount,0);
if(scratch.size() < mRecCount)
scratch.resize(mRecCount*sizeof(bool));
thrust::device_ptr<bool> d_group((bool*)thrust::raw_pointer_cast(scratch.data()));
d_group[mRecCount-1] = 1;
for(int i = 0; i < columnRef.size(); columnRef.pop()) {
if(ts_cols[columnRef.top()]) {
queue<string> fields;
fields.push(columnRef.top());
copyFinalize(this, fields,1);
time_t start_t;
std::vector<time_t> rcol;
thrust::device_vector<int_type> unq(mRecCount);
thrust::copy(d_columns_int[columnRef.top()].begin(), d_columns_int[columnRef.top()].begin() + mRecCount, unq.begin());
auto result_end = thrust::unique(unq.begin(), unq.end());
if(unq[0] != 0 || mRecCount == 1)
start_t = unq[0]/1000;
else {
start_t = unq[1]/1000;
};
time_t end_t = unq[(result_end-unq.begin())-1]/1000;
cout << "start end " << start_t << " " << end_t << endl;
//int year_start, year_end, month_start, month_end, day_start, day_end, hour_start, hour_end, minute_start, minute_end, second_start, second_end;
//struct tm my_tm, my_tm1;
auto my_tm = *gmtime (&start_t);
auto my_tm1 = *gmtime (&end_t );
//cout << my_tm.tm_year << " " << my_tm1.tm_year << " " << my_tm.tm_min << " " << my_tm1.tm_min << " " << my_tm.tm_hour << " " << my_tm1.tm_hour << endl;
rcol.push_back(0);//1970/01/01
auto pos = grp_val.find("YEAR");
int grp_num;
if(pos != string::npos) {
grp_num = stoi(grp_val.substr(0, pos));
my_tm.tm_mon = 0;
my_tm.tm_mday = 1;
my_tm.tm_hour = 0;
my_tm.tm_min = 0;
my_tm.tm_sec = 0;
start_t = tm_to_time_t_utc(&my_tm);
rcol.push_back(start_t*1000);
while(start_t <= end_t) {
start_t = add_interval(start_t, grp_num, 0, 0, 0, 0, 0);
rcol.push_back(start_t*1000);
};
}
else {
pos = grp_val.find("MONTH");
int grp_num;
if(pos != string::npos) {
grp_num = stoi(grp_val.substr(0, pos));
my_tm.tm_mday = 1;
my_tm.tm_hour = 0;
my_tm.tm_min = 0;
my_tm.tm_sec = 0;
start_t = tm_to_time_t_utc(&my_tm);
cout << "interval " << start_t << endl;
rcol.push_back(start_t*1000);
while(start_t <= end_t) {
start_t = add_interval(start_t, 0, grp_num, 0, 0, 0, 0);
cout << "interval " << start_t << endl;
rcol.push_back(start_t*1000);
};
}
else {
pos = grp_val.find("DAY");
int grp_num;
if(pos != string::npos) {
grp_num = stoi(grp_val.substr(0, pos));
my_tm.tm_hour = 0;
my_tm.tm_min = 0;
my_tm.tm_sec = 0;
start_t = tm_to_time_t_utc(&my_tm);
rcol.push_back(start_t*1000);
while(start_t <= end_t) {
start_t = add_interval(start_t, 0, 0, grp_num, 0, 0, 0);
rcol.push_back(start_t*1000);
};
}
else {
pos = grp_val.find("HOUR");
int grp_num;
if(pos != string::npos) {
grp_num = stoi(grp_val.substr(0, pos));
my_tm.tm_min = 0;
my_tm.tm_sec = 0;
start_t = tm_to_time_t_utc(&my_tm);
rcol.push_back(start_t*1000);
while(start_t <= end_t) {
start_t = add_interval(start_t, 0, 0, 0, grp_num, 0, 0);
rcol.push_back(start_t*1000);
};
}
else {
pos = grp_val.find("MINUTE");
int grp_num;
if(pos != string::npos) {
grp_num = stoi(grp_val.substr(0, pos));
my_tm.tm_sec = 0;
start_t = tm_to_time_t_utc(&my_tm);
rcol.push_back(start_t*1000);
while(start_t <= end_t) {
start_t = add_interval(start_t, 0, 0, 0, 0, grp_num, 0);
rcol.push_back(start_t*1000);
};
}
else {
pos = grp_val.find("SECOND");
int grp_num;
if(pos != string::npos) {
grp_num = stoi(grp_val.substr(0, pos));
start_t = tm_to_time_t_utc(&my_tm);
rcol.push_back(start_t*1000);
while(start_t <= end_t) {
start_t = add_interval(start_t, 0, 0, 0, 0, 0, grp_num);
rcol.push_back(start_t*1000);
};
}
}
}
}
}
};
//thrust::device_vector<unsigned int> output(mRecCount);
rcol_matches.resize(mRecCount);
rcol_dev.resize(rcol.size());
thrust::copy(rcol.data(), rcol.data() + rcol.size(), rcol_dev.begin());
thrust::lower_bound(rcol_dev.begin(), rcol_dev.end(), d_columns_int[columnRef.top()].begin(), d_columns_int[columnRef.top()].begin() + mRecCount, rcol_matches.begin());
thrust::transform(rcol_matches.begin(), rcol_matches.begin() + mRecCount - 1, rcol_matches.begin()+1, d_group, thrust::not_equal_to<unsigned int>());
thrust::transform(rcol_matches.begin(), rcol_matches.begin() + mRecCount, rcol_matches.begin(), decrease());
d_group[mRecCount-1] = 1;
}
else {
unsigned int bits;
if(cpy_bits.empty())
bits = 0;
else
bits = cpy_bits[columnRef.top()];
if(bits == 8) {
if (type[columnRef.top()] != 1) { // int_type
thrust::device_ptr<unsigned char> src((unsigned char*)thrust::raw_pointer_cast(d_columns_int[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned char>());
}
else {
thrust::device_ptr<unsigned char> src((unsigned char*)thrust::raw_pointer_cast(d_columns_float[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned char>());
};
}
else if(bits == 16) {
if (type[columnRef.top()] != 1) { // int_type
thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(d_columns_int[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned short int>());
}
else {
thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(d_columns_float[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned short int>());
};
}
else if(bits == 32) {
if (type[columnRef.top()] != 1) { // int_type
thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(d_columns_int[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned int>());
}
else {
thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(d_columns_float[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned int>());
};
}
else {
if (type[columnRef.top()] != 1) { // int_type
thrust::transform(d_columns_int[columnRef.top()].begin(), d_columns_int[columnRef.top()].begin() + mRecCount - 1,
d_columns_int[columnRef.top()].begin()+1, d_group, thrust::not_equal_to<int_type>());
}
else {
thrust::transform(d_columns_float[columnRef.top()].begin(), d_columns_float[columnRef.top()].begin() + mRecCount - 1,
d_columns_float[columnRef.top()].begin()+1, d_group, f_not_equal_to());
};
}
};
thrust::transform(d_group, d_group+mRecCount, grp.begin(), grp.begin(), thrust::logical_or<bool>());
};
grp_count = thrust::count(grp.begin(), grp.begin()+mRecCount, 1);
cout << "grp count " << grp_count << endl;
};
void CudaSet::addDeviceColumn(int_type* col, string colname, size_t recCount)
{
if (std::find(columnNames.begin(), columnNames.end(), colname) == columnNames.end()) {
columnNames.push_back(colname);
type[colname] = 0;
d_columns_int[colname] = thrust::device_vector<int_type>(recCount);
h_columns_int[colname] = thrust::host_vector<int_type, uninitialized_host_allocator<int_type> >(recCount);
}
else { // already exists, my need to resize it
if(d_columns_int[colname].size() < recCount) {
d_columns_int[colname].resize(recCount);
};
if(h_columns_int[colname].size() < recCount) {
h_columns_int[colname].resize(recCount);
};
};
// copy data to d columns
thrust::device_ptr<int_type> d_col((int_type*)col);
thrust::copy(d_col, d_col+recCount, d_columns_int[colname].begin());
thrust::copy(d_columns_int[colname].begin(), d_columns_int[colname].begin()+recCount, h_columns_int[colname].begin());
};
void CudaSet::addDeviceColumn(float_type* col, string colname, size_t recCount, bool is_decimal)
{
if (std::find(columnNames.begin(), columnNames.end(), colname) == columnNames.end()) {
columnNames.push_back(colname);
type[colname] = 1;
d_columns_float[colname] = thrust::device_vector<float_type>(recCount);
h_columns_float[colname] = thrust::host_vector<float_type, uninitialized_host_allocator<float_type> >(recCount);
}
else { // already exists, my need to resize it
if(d_columns_float[colname].size() < recCount)
d_columns_float[colname].resize(recCount);
if(h_columns_float[colname].size() < recCount)
h_columns_float[colname].resize(recCount);
};
decimal[colname] = is_decimal;
thrust::device_ptr<float_type> d_col((float_type*)col);
thrust::copy(d_col, d_col+recCount, d_columns_float[colname].begin());
};
void CudaSet::gpu_perm(queue<string> sf, thrust::device_vector<unsigned int>& permutation) {
permutation.resize(mRecCount);
thrust::sequence(permutation.begin(), permutation.begin() + mRecCount,0,1);
unsigned int* raw_ptr = thrust::raw_pointer_cast(permutation.data());
void* temp;
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, mRecCount*8));
string sort_type = "ASC";
while(!sf.empty()) {
if (type[sf.front()] == 0) {
update_permutation(d_columns_int[sf.front()], raw_ptr, mRecCount, sort_type, (int_type*)temp, 64);
}
else if (type[sf.front()] == 1) {
update_permutation(d_columns_float[sf.front()], raw_ptr, mRecCount, sort_type, (float_type*)temp, 64);
}
else {
thrust::host_vector<unsigned int> permutation_h = permutation;
char* temp1 = new char[char_size[sf.front()]*mRecCount];
update_permutation_char_host(h_columns_char[sf.front()], permutation_h.data(), mRecCount, sort_type, temp1, char_size[sf.front()]);
delete [] temp1;
permutation = permutation_h;
};
sf.pop();
};
cudaFree(temp);
}
void CudaSet::compress(string file_name, size_t offset, unsigned int check_type, unsigned int check_val, size_t mCount, const bool append)
{
string str(file_name);
thrust::device_vector<unsigned int> permutation;
long long int oldCount;
bool int_check = 0;
void* d;
CUDA_SAFE_CALL(cudaMalloc((void **) &d, mCount*float_size));
total_count = total_count + mCount;
if (mCount > total_max && op_sort.empty()) {
total_max = mCount;
};
if(!total_segments && append) {
string s= file_name + "." + columnNames[0] + ".header";
ifstream binary_file(s.c_str(),ios::binary);
if(binary_file) {
binary_file.read((char *)&oldCount, 8);
binary_file.read((char *)&total_segments, 4);
binary_file.read((char *)&maxRecs, 4);
if(total_max < maxRecs)
total_max = maxRecs;
binary_file.close();
total_count = oldCount + mCount;
};
};
string s = file_name + ".interval";
ifstream f(s.c_str());
if (f.good()) {
f.seekg (0, f.end);
int length = f.tellg();
f.seekg (0, f.beg);
char* buff = new char[length];
f.read(buff, length);
f.close();
char* p = strtok(buff, "|");
string s1(p);
p = strtok(NULL, "|");
string s2(p);
delete [] buff;
s = file_name + ".key";
ifstream f1(s.c_str());
if (f1.good()) {
f1.seekg (0, f1.end);
length = f1.tellg();
f1.seekg (0, f1.beg);
buff = new char[length+1];
buff[length] = 0;
f1.read(buff, length);
f1.close();
string s3(buff);
delete [] buff;
load_file_name = file_name;
calc_intervals(s1, s2, s3, total_segments, append);
int_check = 1;
};
};
if(!op_sort.empty()) { //sort the segment
gpu_perm(op_sort, permutation);
};
// here we need to check for partitions and if partition_count > 0 -> create partitions
if(mCount < partition_count || partition_count == 0)
partition_count = 1;
unsigned int partition_recs = mCount/partition_count;
if(!op_sort.empty()) {
if(total_max < partition_recs)
total_max = partition_recs;
};
total_segments++;
old_segments = total_segments;
size_t new_offset;
for(unsigned int i = 0; i < columnNames.size(); i++) {
std::clock_t start1 = std::clock();
string colname = columnNames[i];
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
new_offset = 0;
if(type[colname] == 0) {
thrust::device_ptr<int_type> d_col((int_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_int[colname].begin(), d_col);
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
if (p < partition_count - 1) {
pfor_compress( (int_type*)d + new_offset, partition_recs*int_size, str, h_columns_int[colname], 0);
}
else {
pfor_compress( (int_type*)d + new_offset, (mCount - partition_recs*p)*int_size, str, h_columns_int[colname], 0);
};
new_offset = new_offset + partition_recs;
total_segments++;
};
}
else {
if(!int_check) {
thrust::copy(h_columns_int[colname].begin() + offset, h_columns_int[colname].begin() + offset + mCount, d_col);
pfor_compress( d, mCount*int_size, str, h_columns_int[colname], 0);
}
else {
pfor_compress( thrust::raw_pointer_cast(d_columns_int[colname].data()), mCount*int_size, str, h_columns_int[colname], 0);
};
};
}
else if(type[colname] == 1) {
if(decimal[colname]) {
thrust::device_ptr<float_type> d_col((float_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_float[colname].begin(), d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+mCount,d_col_dec, float_to_long());
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
if (p < partition_count - 1)
pfor_compress( (int_type*)d + new_offset, partition_recs*float_size, str, h_columns_float[colname], 1);
else
pfor_compress( (int_type*)d + new_offset, (mCount - partition_recs*p)*float_size, str, h_columns_float[colname], 1);
new_offset = new_offset + partition_recs;
total_segments++;
};
}
else {
thrust::copy(h_columns_float[colname].begin() + offset, h_columns_float[colname].begin() + offset + mCount, d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+mCount,d_col_dec, float_to_long());
pfor_compress( d, mCount*float_size, str, h_columns_float[colname], 1);
};
}
else { // do not compress -- float
thrust::device_ptr<float_type> d_col((float_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_float[colname].begin(), d_col);
thrust::copy(d_col, d_col+mRecCount, h_columns_float[colname].begin());
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
unsigned int curr_cnt;
if (p < partition_count - 1)
curr_cnt = partition_recs;
else
curr_cnt = mCount - partition_recs*p;
fstream binary_file(str.c_str(),ios::out|ios::binary|fstream::app);
binary_file.write((char *)&curr_cnt, 4);
binary_file.write((char *)(h_columns_float[colname].data() + new_offset),curr_cnt*float_size);
new_offset = new_offset + partition_recs;
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
}
else {
fstream binary_file(str.c_str(),ios::out|ios::binary|fstream::app);
binary_file.write((char *)&mCount, 4);
binary_file.write((char *)(h_columns_float[colname].data() + offset),mCount*float_size);
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
};
}
else { //char
//populate char_hash
if(append && total_segments == 1) {
string s= file_name + "." + colname;
ifstream binary_file(s.c_str(),ios::binary);
if(binary_file) {
char* strings = new char[oldCount*char_size[colname]];
binary_file.read(strings, oldCount*char_size[colname]);
binary_file.close();
unsigned int ind = std::find(columnNames.begin(), columnNames.end(), colname) - columnNames.begin();
for (unsigned int z = 0 ; z < oldCount; z++) {
char_hash[ind][MurmurHash64A(&strings[z*char_size[colname]], char_size[colname], hash_seed)/2] = z;
};
delete [] strings;
};
};
if(!op_sort.empty()) {
unsigned int* h_permutation = new unsigned int[mRecCount];
thrust::copy(permutation.begin(), permutation.end(), h_permutation);
char* t = new char[char_size[colname]*mRecCount];
apply_permutation_char_host(h_columns_char[colname], h_permutation, mRecCount, t, char_size[colname]);
delete [] h_permutation;
thrust::copy(t, t+ char_size[colname]*mRecCount, h_columns_char[colname]);
delete [] t;
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
if (p < partition_count - 1)
compress_char(str, colname, partition_recs, new_offset, total_segments-1);
else
compress_char(str, colname, mCount - partition_recs*p, new_offset, total_segments-1);
new_offset = new_offset + partition_recs;
total_segments++;
};
}
else {
compress_char(str, colname, mCount, offset, total_segments-1);
};
};
if((check_type == 1 && fact_file_loaded) || (check_type == 1 && check_val == 0)) {
if(!op_sort.empty())
writeHeader(file_name, colname, total_segments-1);
else {
writeHeader(file_name, colname, total_segments);
};
};
total_segments = old_segments;
};
cudaFree(d);
if(!op_sort.empty()) {
total_segments = (old_segments-1)+partition_count;
};
permutation.resize(0);
permutation.shrink_to_fit();
}
void CudaSet::calc_intervals(string dt1, string dt2, string index, unsigned int total_segs, bool append) {
alloced_switch = 1;
not_compressed = 1;
thrust::device_vector<unsigned int> permutation;
thrust::device_vector<int_type> stencil(maxRecs);
thrust::device_vector<int_type> d_dt2(maxRecs);
thrust::device_vector<int_type> d_index(maxRecs);
phase_copy = 0;
queue<string> sf;
sf.push(dt1);
sf.push(index);
gpu_perm(sf, permutation);
for(unsigned int i = 0; i < columnNames.size(); i++) {
if(type[columnNames[i]] == 0)
apply_permutation(d_columns_int[columnNames[i]], thrust::raw_pointer_cast(permutation.data()), mRecCount, (int_type*)thrust::raw_pointer_cast(stencil.data()), 0);
else {
unsigned int* h_permutation = new unsigned int[mRecCount];
thrust::copy(permutation.begin(), permutation.end(), h_permutation);
char* t = new char[char_size[columnNames[i]]*mRecCount];
apply_permutation_char_host(h_columns_char[columnNames[i]], h_permutation, mRecCount, t, char_size[columnNames[i]]);
delete [] h_permutation;
thrust::copy(t, t+ char_size[columnNames[i]]*mRecCount, h_columns_char[columnNames[i]]);
delete [] t;
};
};
if(type[index] == 2) {
d_columns_int[index] = thrust::device_vector<int_type>(mRecCount);
h_columns_int[index] = thrust::host_vector<int_type>(mRecCount);
for(int i = 0; i < mRecCount; i++)
h_columns_int[index][i] = MurmurHash64A(&h_columns_char[index][i*char_size[index]], char_size[index], hash_seed)/2;
d_columns_int[index] = h_columns_int[index];
};
thrust::counting_iterator<unsigned int> begin(0);
gpu_interval ff(thrust::raw_pointer_cast(d_columns_int[dt1].data()), thrust::raw_pointer_cast(d_columns_int[dt2].data()), thrust::raw_pointer_cast(d_columns_int[index].data()));
thrust::for_each(begin, begin + mRecCount - 1, ff);
auto stack_count = mRecCount;
if(append) {
not_compressed = 0;
size_t mysz = 8;
if(char_size[index] > int_size)
mysz = char_size[index];
if(mysz*maxRecs > alloced_sz) {
if(alloced_sz) {
cudaFree(alloced_tmp);
};
cudaMalloc((void **) &alloced_tmp, mysz*maxRecs);
alloced_sz = mysz*maxRecs;
}
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
d_columns_int[dt2].resize(0);
thrust::device_vector<unsigned int> output(stack_count);
for(int i = 0; i < total_segments; i++) {
CopyColumnToGpu(dt2, i, 0);
if(thrust::count(d_col, d_col+mRecCount,0)) {
thrust::copy(d_col, d_col+mRecCount, d_dt2.begin());
if(type[index] == 2) {
string f1 = load_file_name + "." + index + "." + to_string(i) + ".hash";
FILE* f = fopen(f1.c_str(), "rb" );
unsigned int cnt;
fread(&cnt, 4, 1, f);
unsigned long long int* buff = new unsigned long long int[cnt];
fread(buff, cnt*8, 1, f);
fclose(f);
thrust::copy(buff, buff + cnt, d_index.begin());
delete [] buff;
}
else {
CopyColumnToGpu(index, i, 0);
thrust::copy(d_col, d_col+mRecCount, d_index.begin());
};
thrust::lower_bound(d_columns_int[index].begin(), d_columns_int[index].begin()+stack_count, d_index.begin(), d_index.begin() + mRecCount, output.begin());
gpu_interval_set f(thrust::raw_pointer_cast(d_columns_int[dt1].data()), thrust::raw_pointer_cast(d_dt2.data()),
thrust::raw_pointer_cast(d_index.data()), thrust::raw_pointer_cast(d_columns_int[index].data()),
thrust::raw_pointer_cast(output.data()));
thrust::for_each(begin, begin + mRecCount, f);
string str = load_file_name + "." + dt2 + "." + to_string(i);;
pfor_compress( thrust::raw_pointer_cast(d_dt2.data()), mRecCount*int_size, str, h_columns_int[dt2], 0);
};
};
}
};
void CudaSet::writeHeader(string file_name, string colname, unsigned int tot_segs) {
string str = file_name + "." + colname;
string ff = str;
str += ".header";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
binary_file.write((char *)&total_count, 8);
binary_file.write((char *)&tot_segs, 4);
binary_file.write((char *)&total_max, 4);
binary_file.write((char *)&cnt_counts[ff], 4);
//cout << "HEADER1 " << total_count << " " << tot_segs << " " << total_max << endl;
binary_file.close();
};
void CudaSet::reWriteHeader(string file_name, string colname, unsigned int tot_segs, size_t newRecs, size_t maxRecs1) {
string str = file_name + "." + colname;
string ff = str;
str += ".header";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
binary_file.write((char *)&newRecs, 8);
binary_file.write((char *)&tot_segs, 4);
binary_file.write((char *)&maxRecs1, 4);
//cout << "HEADER2 " << newRecs << endl;
binary_file.close();
};
void CudaSet::writeSortHeader(string file_name)
{
string str(file_name);
unsigned int idx;
if(!op_sort.empty()) {
str += ".sort";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
idx = (unsigned int)op_sort.size();
binary_file.write((char *)&idx, 4);
queue<string> os(op_sort);
while(!os.empty()) {
if(verbose)
cout << "sorted on " << idx << endl;
idx = os.front().size();
binary_file.write((char *)&idx, 4);
binary_file.write(os.front().data(), idx);
os.pop();
};
binary_file.close();
}
else {
str += ".sort";
remove(str.c_str());
};
str = file_name;
if(!op_presort.empty()) {
str += ".presort";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
idx = (unsigned int)op_presort.size();
binary_file.write((char *)&idx, 4);
queue<string> os(op_presort);
while(!os.empty()) {
idx = os.front().size();
binary_file.write((char *)&idx, 4);
binary_file.write(os.front().data(), idx);
os.pop();
};
binary_file.close();
}
else {
str += ".presort";
remove(str.c_str());
};
}
using namespace mgpu;
void CudaSet::Display(unsigned int limit, bool binary, bool term)
{
#define MAXCOLS 128
#define MAXFIELDSIZE 1400
//-- This should/will be converted to an array holding pointers of malloced sized structures--
char bigbuf[MAXCOLS * MAXFIELDSIZE];
memset(bigbuf, 0, MAXCOLS * MAXFIELDSIZE);
char *fields[MAXCOLS];
const char *dcolumns[MAXCOLS];
size_t mCount; // num records in play
bool print_all = 0;
string ss, str;
int rows = 0;
if(limit != 0 && limit < mRecCount)
mCount = limit;
else {
mCount = mRecCount;
print_all = 1;
};
cout << "mRecCount=" << mRecCount << " mcount = " << mCount << " term " << term << " limit=" << limit << " print_all=" << print_all << endl;
unsigned int cc =0;
unordered_map<string, FILE*> file_map;
unordered_map<string, unsigned int> len_map;
for(unsigned int i = 0; i < columnNames.size(); i++)
{
fields[cc] = &(bigbuf[cc*MAXFIELDSIZE]); // a hack to avoid malloc overheads - refine later
dcolumns[cc++] = columnNames[i].c_str();
if(string_map.find(columnNames[i]) != string_map.end()) {
auto s = string_map[columnNames[i]];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
FILE *f;
f = fopen(string_map[columnNames[i]].c_str(), "rb");
file_map[string_map[columnNames[i]]] = f;
len_map[string_map[columnNames[i]]] = len;
};
};
// The goal here is to loop fast and avoid any double handling of outgoing data - pointers are good.
if(not_compressed && prm_d.size() == 0) {
for(unsigned int i=0; i < mCount; i++) { // for each record
for(unsigned int j=0; j < columnNames.size(); j++) { // for each col
if (type[columnNames[j]] != 1) {
if(string_map.find(columnNames[j]) == string_map.end()) {
if(decimal_zeroes[columnNames[j]]) {
str = std::to_string(h_columns_int[columnNames[j]][i]);
//cout << "decimals " << columnNames[j] << " " << decimal_zeroes[columnNames[j]] << " " << h_columns_int[columnNames[j]][i] << endl;
while(str.length() <= decimal_zeroes[columnNames[j]])
str = '0' + str;
str.insert(str.length()- decimal_zeroes[columnNames[j]], ".");
sprintf(fields[j], "%s", str.c_str());
}
else {
if(!ts_cols[columnNames[j]])
sprintf(fields[j], "%lld", (h_columns_int[columnNames[j]])[i] );
else {
time_t ts = (h_columns_int[columnNames[j]][i])/1000;
auto ti = gmtime(&ts);
char buffer[30];
auto rem = (h_columns_int[columnNames[j]][i])%1000;
strftime(buffer,30,"%Y-%m-%d %H.%M.%S", ti);
//fprintf(file_pr, "%s", buffer);
//fprintf(file_pr, ".%d", rem);
sprintf(fields[j], "%s.%d", buffer,rem);
/*time_t tt = h_columns_int[columnNames[j]][i];
auto ti = localtime(&tt);
char buffer[10];
strftime(buffer,80,"%Y-%m-%d", ti);
sprintf(fields[j], "%s", buffer);
*/
};
};
}
else {
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(fields[j], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fields[j][len_map[string_map[columnNames[j]]]] ='\0'; // zero terminate string
};
}
else
sprintf(fields[j], "%.2f", (h_columns_float[columnNames[j]])[i] );
};
row_cb(mColumnCount, (char **)fields, (char **)dcolumns);
rows++;
};
}
else {
queue<string> op_vx;
for(unsigned int i = 0; i < columnNames.size(); i++)
op_vx.push(columnNames[i]);
if(prm_d.size() || source) {
allocColumns(this, op_vx);
};
unsigned int curr_seg = 0;
size_t cnt = 0;
size_t curr_count, sum_printed = 0;
resize(maxRecs);
while(sum_printed < mCount || print_all) {
if(prm_d.size() || source) { // if host arrays are empty
copyColumns(this, op_vx, curr_seg, cnt);
size_t olRecs = mRecCount;
mRecCount = olRecs;
CopyToHost(0,mRecCount);
if(sum_printed + mRecCount <= mCount || print_all)
curr_count = mRecCount;
else
curr_count = mCount - sum_printed;
}
else
curr_count = mCount;
sum_printed = sum_printed + mRecCount;
for(unsigned int i=0; i < curr_count; i++) {
for(unsigned int j=0; j < columnNames.size(); j++) {
if (type[columnNames[j]] != 1) {
if(string_map.find(columnNames[j]) == string_map.end())
sprintf(fields[j], "%lld", (h_columns_int[columnNames[j]])[i] );
else {
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(fields[j], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fields[j][len_map[string_map[columnNames[j]]]] ='\0'; // zero terminate string
};
}
else
sprintf(fields[j], "%.2f", (h_columns_float[columnNames[j]])[i] );
};
row_cb(mColumnCount, (char **)fields, (char**)dcolumns);
rows++;
};
curr_seg++;
if(curr_seg == segCount)
print_all = 0;
};
}; // end else
for(auto it = file_map.begin(); it != file_map.end(); it++)
fclose(it->second);
}
void CudaSet::Store(const string file_name, const char* sep, const unsigned int limit, const bool binary, const bool append, const bool term)
{
if (mRecCount == 0 && binary == 1 && !term) { // write tails
for(unsigned int j=0; j < columnNames.size(); j++) {
writeHeader(file_name, columnNames[j], total_segments);
};
return;
};
size_t mCount;
bool print_all = 0;
string str;
if(limit != 0 && limit < mRecCount)
mCount = limit;
else {
mCount = mRecCount;
print_all = 1;
};
if(binary == 0) {
unordered_map<string, FILE*> file_map;
unordered_map<string, unsigned int> len_map;
string bf;
unsigned int max_len = 0;
for(unsigned int j=0; j < columnNames.size(); j++) {
if(string_map.find(columnNames[j]) != string_map.end()) {
auto s = string_map[columnNames[j]];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
if(len > max_len)
max_len = len;
FILE *f;
f = fopen(string_map[columnNames[j]].c_str(), "rb");
file_map[string_map[columnNames[j]]] = f;
len_map[string_map[columnNames[j]]] = len;
};
};
bf.reserve(max_len);
FILE *file_pr;
if(!term) {
file_pr = fopen(file_name.c_str(), "w");
if (!file_pr)
cout << "Could not open file " << file_name << endl;
}
else
file_pr = stdout;
if(not_compressed && prm_d.size() == 0) {
for(unsigned int i=0; i < mCount; i++) {
for(unsigned int j=0; j < columnNames.size(); j++) {
if (type[columnNames[j]] != 1 ) {
if(string_map.find(columnNames[j]) == string_map.end()) {
if(decimal_zeroes[columnNames[j]]) {
str = std::to_string(h_columns_int[columnNames[j]][i]);
//cout << "decimals " << columnNames[j] << " " << decimal_zeroes[columnNames[j]] << " " << h_columns_int[columnNames[j]][i] << endl;
while(str.length() <= decimal_zeroes[columnNames[j]])
str = '0' + str;
str.insert(str.length()- decimal_zeroes[columnNames[j]], ".");
fprintf(file_pr, "%s", str.c_str());
}
else {
if(!ts_cols[columnNames[j]]) {
fprintf(file_pr, "%lld", (h_columns_int[columnNames[j]])[i]);
}
else {
time_t ts = (h_columns_int[columnNames[j]][i])/1000;
auto ti = gmtime(&ts);
char buffer[30];
auto rem = (h_columns_int[columnNames[j]][i])%1000;
strftime(buffer,30,"%Y-%m-%d %H.%M.%S", ti);
fprintf(file_pr, "%s", buffer);
fprintf(file_pr, ".%d", rem);
};
};
}
else {
//fprintf(file_pr, "%.*s", string_hash[columnNames[j]][h_columns_int[columnNames[j]][i]].size(), string_hash[columnNames[j]][h_columns_int[columnNames[j]][i]].c_str());
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(&bf[0], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fprintf(file_pr, "%.*s", len_map[string_map[columnNames[j]]], bf.c_str());
};
fputs(sep, file_pr);
}
else {
fprintf(file_pr, "%.2f", (h_columns_float[columnNames[j]])[i]);
fputs(sep, file_pr);
}
};
if (i != mCount -1 )
fputs("\n",file_pr);
};
if(!term)
fclose(file_pr);
}
else {
queue<string> op_vx;
string ss;
for(unsigned int j=0; j < columnNames.size(); j++)
op_vx.push(columnNames[j]);
if(prm_d.size() || source) {
allocColumns(this, op_vx);
};
unsigned int curr_seg = 0;
size_t cnt = 0;
size_t curr_count, sum_printed = 0;
mRecCount = 0;
resize(maxRecs);
while(sum_printed < mCount || print_all) {
if(prm_d.size() || source) {
copyColumns(this, op_vx, curr_seg, cnt);
if(curr_seg == 0) {
if(limit != 0 && limit < mRecCount) {
mCount = limit;
print_all = 0;
}
else {
mCount = mRecCount;
print_all = 1;
};
};
// if host arrays are empty
size_t olRecs = mRecCount;
mRecCount = olRecs;
CopyToHost(0,mRecCount);
//cout << "start " << sum_printed << " " << mRecCount << " " << mCount << endl;
if(sum_printed + mRecCount <= mCount || print_all) {
curr_count = mRecCount;
}
else {
curr_count = mCount - sum_printed;
};
}
else {
curr_count = mCount;
};
sum_printed = sum_printed + mRecCount;
//cout << "sum printed " << sum_printed << " " << curr_count << " " << curr_seg << endl;
for(unsigned int i=0; i < curr_count; i++) {
for(unsigned int j=0; j < columnNames.size(); j++) {
if (type[columnNames[j]] != 1) {
if(string_map.find(columnNames[j]) == string_map.end()) {
cout << "here3 " << endl;
if(decimal_zeroes[columnNames[j]]) {
str = std::to_string(h_columns_int[columnNames[j]][i]);
//cout << "decimals " << columnNames[j] << " " << decimal_zeroes[columnNames[j]] << " " << h_columns_int[columnNames[j]][i] << endl;
while(str.length() <= decimal_zeroes[columnNames[j]])
str = '0' + str;
str.insert(str.length()- decimal_zeroes[columnNames[j]], ".");
fprintf(file_pr, "%s", str.c_str());
}
else {
if(!ts_cols[columnNames[j]]) {
fprintf(file_pr, "%lld", (h_columns_int[columnNames[j]])[i]);
}
else {
time_t ts = (h_columns_int[columnNames[j]][i])/1000;
auto ti = gmtime(&ts);
char buffer[30];
auto rem = (h_columns_int[columnNames[j]][i])%1000;
strftime(buffer,30,"%Y-%m-%d %H.%M.%S", ti);
fprintf(file_pr, "%s", buffer);
fprintf(file_pr, ".%d", rem);
};
};
}
else {
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(&bf[0], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fprintf(file_pr, "%.*s", len_map[string_map[columnNames[j]]], bf.c_str());
};
fputs(sep, file_pr);
}
else {
fprintf(file_pr, "%.2f", (h_columns_float[columnNames[j]])[i]);
fputs(sep, file_pr);
};
};
if (i != mCount -1 && (curr_seg != segCount || i < curr_count))
fputs("\n",file_pr);
};
curr_seg++;
if(curr_seg == segCount)
print_all = 0;
};
if(!term) {
fclose(file_pr);
};
};
for(auto it = file_map.begin(); it != file_map.end(); it++)
fclose(it->second);
}
else {
//lets update the data dictionary
for(unsigned int j=0; j < columnNames.size(); j++) {
data_dict[file_name][columnNames[j]].col_type = type[columnNames[j]];
if(type[columnNames[j]] != 2) {
if(decimal[columnNames[j]])
data_dict[file_name][columnNames[j]].col_length = decimal_zeroes[columnNames[j]];
else if (ts_cols[columnNames[j]])
data_dict[file_name][columnNames[j]].col_length = UINT_MAX;
else
data_dict[file_name][columnNames[j]].col_length = 0;
}
else
data_dict[file_name][columnNames[j]].col_length = char_size[columnNames[j]];
};
save_dict = 1;
if(text_source) { //writing a binary file using a text file as a source
compress(file_name, 0, 1, 0, mCount, append);
for(unsigned int i = 0; i< columnNames.size(); i++)
if(type[columnNames[i]] == 2)
deAllocColumnOnDevice(columnNames[i]);
}
else { //writing a binary file using a binary file as a source
fact_file_loaded = 1;
size_t offset = 0;
if(!not_compressed) { // records are compressed, for example after filter op.
//decompress to host
queue<string> op_vx;
for(unsigned int i = 0; i< columnNames.size(); i++) {
op_vx.push(columnNames[i]);
};
allocColumns(this, op_vx);
size_t oldCnt = mRecCount;
mRecCount = 0;
resize(oldCnt);
mRecCount = oldCnt;
for(unsigned int i = 0; i < segCount; i++) {
size_t cnt = 0;
copyColumns(this, op_vx, i, cnt);
CopyToHost(0, mRecCount);
offset = offset + mRecCount;
compress(file_name, 0, 0, i - (segCount-1), mRecCount, append);
};
}
else {
// now we have decompressed records on the host
//call setSegments and compress columns in every segment
segCount = (mRecCount/process_count + 1);
offset = 0;
for(unsigned int z = 0; z < segCount; z++) {
if(z < segCount-1) {
if(mRecCount < process_count) {
mCount = mRecCount;
}
else {
mCount = process_count;
}
}
else {
mCount = mRecCount - (segCount-1)*process_count;
};
compress(file_name, offset, 0, z - (segCount-1), mCount, append);
offset = offset + mCount;
};
};
};
};
}
void CudaSet::compress_char(const string file_name, const string colname, const size_t mCount, const size_t offset, const unsigned int segment)
{
unsigned int len = char_size[colname];
string h_name, i_name, file_no_seg = file_name.substr(0, file_name.find_last_of("."));
i_name = file_no_seg + "." + to_string(segment) + ".idx";
h_name = file_no_seg + "." + to_string(segment) + ".hash";
fstream b_file_str, loc_hashes;
fstream binary_file_h(h_name.c_str(),ios::out|ios::binary|ios::trunc);
binary_file_h.write((char *)&mCount, 4);
if(segment == 0) {
b_file_str.open(file_no_seg.c_str(),ios::out|ios::binary|ios::trunc);
}
else {
b_file_str.open(file_no_seg.c_str(),ios::out|ios::binary|ios::app);
};
if(h_columns_int.find(colname) == h_columns_int.end()) {
h_columns_int[colname] = thrust::host_vector<int_type >(mCount);
}
else {
if(h_columns_int[colname].size() < mCount)
h_columns_int[colname].resize(mCount);
};
if(d_columns_int.find(colname) == d_columns_int.end()) {
d_columns_int[colname] = thrust::device_vector<int_type >(mCount);
}
else {
if(d_columns_int[colname].size() < mCount)
d_columns_int[colname].resize(mCount);
};
size_t cnt;
long long int* hash_array = new long long int[mCount];
map<unsigned long long int, size_t>::iterator iter;
unsigned int ind = std::find(columnNames.begin(), columnNames.end(), colname) - columnNames.begin();
for (unsigned int i = 0 ; i < mCount; i++) {
hash_array[i] = MurmurHash64A(h_columns_char[colname] + (i+offset)*len, len, hash_seed)/2;
iter = char_hash[ind].find(hash_array[i]);
if(iter == char_hash[ind].end()) {
cnt = char_hash[ind].size();
char_hash[ind][hash_array[i]] = cnt;
b_file_str.write((char *)h_columns_char[colname] + (i+offset)*len, len);
h_columns_int[colname][i] = cnt;
}
else {
h_columns_int[colname][i] = iter->second;
};
};
binary_file_h.write((char *)hash_array, 8*mCount);
delete [] hash_array;
thrust::device_vector<int_type> d_col(mCount);
thrust::copy(h_columns_int[colname].begin(), h_columns_int[colname].begin() + mCount, d_col.begin());
pfor_compress(thrust::raw_pointer_cast(d_col.data()), mCount*int_size, i_name, h_columns_int[colname], 0);
binary_file_h.close();
b_file_str.close();
};
void CudaSet::compress_int(const string file_name, const string colname, const size_t mCount)
{
std::vector<unsigned int> dict_val;
unsigned int bits_encoded;
set<int_type> dict_s;
map<int_type, unsigned int> d_ordered;
for (unsigned int i = 0 ; i < mCount; i++) {
int_type f = h_columns_int[colname][i];
dict_s.insert(f);
};
unsigned int i = 0;
for (auto it = dict_s.begin(); it != dict_s.end(); it++) {
d_ordered[*it] = i++;
};
for (unsigned int i = 0 ; i < mCount; i++) {
int_type f = h_columns_int[colname][i];
dict_val.push_back(d_ordered[f]);
};
bits_encoded = (unsigned int)ceil(log2(double(d_ordered.size()+1)));
//cout << "bits " << bits_encoded << endl;
unsigned int sz = (unsigned int)d_ordered.size();
// write to a file
fstream binary_file(file_name.c_str(),ios::out|ios::binary|ios::trunc);
binary_file.write((char *)&sz, 4);
for (auto it = d_ordered.begin(); it != d_ordered.end(); it++) {
binary_file.write((char*)(&(it->first)), int_size);
};
unsigned int fit_count = 64/bits_encoded;
unsigned long long int val = 0;
binary_file.write((char *)&fit_count, 4);
binary_file.write((char *)&bits_encoded, 4);
unsigned int curr_cnt = 1;
unsigned int vals_count = (unsigned int)dict_val.size()/fit_count;
if(!vals_count || dict_val.size()%fit_count)
vals_count++;
binary_file.write((char *)&vals_count, 4);
unsigned int real_count = (unsigned int)dict_val.size();
binary_file.write((char *)&real_count, 4);
for(unsigned int i = 0; i < dict_val.size(); i++) {
val = val | dict_val[i];
if(curr_cnt < fit_count)
val = val << bits_encoded;
if( (curr_cnt == fit_count) || (i == (dict_val.size() - 1)) ) {
if (curr_cnt < fit_count) {
val = val << ((fit_count-curr_cnt)-1)*bits_encoded;
};
curr_cnt = 1;
binary_file.write((char *)&val, int_size);
val = 0;
}
else
curr_cnt = curr_cnt + 1;
};
binary_file.close();
};
bool first_time = 1;
size_t rec_sz = 0;
size_t process_piece;
bool CudaSet::LoadBigFile(FILE* file_p, thrust::device_vector<char>& d_readbuff, thrust::device_vector<char*>& dest,
thrust::device_vector<unsigned int>& ind, thrust::device_vector<unsigned int>& dest_len)
{
const char* sep = separator.c_str();
unsigned int maxx = cols.rbegin()->first;
map<unsigned int, string>::iterator it;
bool done = 0;
std::clock_t start1 = std::clock();
vector<int> types;
vector<int> cl;
types.push_back(0);
for(int i = 0; i < maxx; i++) {
auto iter = cols.find(i+1);
if(iter != cols.end()) {
types.push_back(type[iter->second]);
cl.push_back(iter->first-1);
}
else
types.push_back(0);
};
if(first_time) {
if(process_count*4 > getFreeMem()) {
process_piece = getFreeMem()/4;
}
else
process_piece = process_count;
readbuff = new char[process_piece+1];
d_readbuff.resize(process_piece+1);
cout << "set a piece to " << process_piece << " " << getFreeMem() << endl;
};
thrust::device_vector<unsigned int> ind_cnt(1);
thrust::device_vector<char> sepp(1);
sepp[0] = *sep;
long long int total_processed = 0;
size_t recs_processed = 0;
bool finished = 0;
thrust::device_vector<long long int> dev_pos;
long long int offset;
unsigned int cnt = 1;
const unsigned int max_len = 23;
while(!done) {
auto rb = fread(readbuff, 1, process_piece, file_p);
if(readbuff[rb-1] != '\n') {
rb++;
readbuff[rb-1] = '\n';
};
if(rb < process_piece) {
done = 1;
finished = 1;
fclose(file_p);
};
if(total_processed >= process_count)
done = 1;
thrust::fill(d_readbuff.begin(), d_readbuff.end(),0);
thrust::copy(readbuff, readbuff+rb, d_readbuff.begin());
auto curr_cnt = thrust::count(d_readbuff.begin(), d_readbuff.begin() + rb, '\n') - 1;
if(recs_processed == 0 && first_time) {
rec_sz = curr_cnt;
if(finished)
rec_sz++;
total_max = curr_cnt;
};
//cout << "curr_cnt " << curr_cnt << " Memory: " << getFreeMem() << endl;
if(first_time) {
for(unsigned int i=0; i < columnNames.size(); i++) {
auto colname = columnNames[i];
if (type[colname] == 0) {
d_columns_int[colname].resize(d_columns_int[colname].size() + rec_sz);
h_columns_int[colname].resize(h_columns_int[colname].size() + rec_sz);
}
else if (type[colname] == 1) {
d_columns_float[colname].resize(d_columns_float[colname].size() + rec_sz);
h_columns_float[colname].resize(h_columns_float[colname].size() + rec_sz);
}
else {
char* c = new char[cnt*rec_sz*char_size[columnNames[i]]];
if(recs_processed > 0) {
memcpy(c, h_columns_char[columnNames[i]], recs_processed*char_size[columnNames[i]]);
delete [] h_columns_char[columnNames[i]];
};
h_columns_char[columnNames[i]] = c;
if(recs_processed == 0) {
void* temp;
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, char_size[columnNames[i]]*rec_sz));
cudaMemset(temp,0,char_size[columnNames[i]]*rec_sz);
d_columns_char[columnNames[i]] = (char*)temp;
};
};
if(recs_processed == 0) {
ind[i] = cl[i];
void* temp;
if(type[columnNames[i]] != 2) {
if(!ts_cols[columnNames[i]]) {
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, max_len*rec_sz));
dest_len[i] = max_len;
}
else {
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, 23*rec_sz));
dest_len[i] = 23;
}
}
else {
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, char_size[columnNames[i]]*rec_sz));
dest_len[i] = char_size[columnNames[i]];
};
dest[i] = (char*)temp;
};
};
};
for(unsigned int i=0; i < columnNames.size(); i++) {
if(type[columnNames[i]] != 2) {
cudaMemset(dest[i],0,max_len*rec_sz);
}
else {
cudaMemset(dest[i],0,char_size[columnNames[i]]*rec_sz);
};
};
if(dev_pos.size() < curr_cnt+1)
dev_pos.resize(curr_cnt+1); //avoiding the unnecessary allocs
dev_pos[0] = -1;
thrust::copy_if(thrust::make_counting_iterator((unsigned long long int)0), thrust::make_counting_iterator((unsigned long long int)rb-1),
d_readbuff.begin(), dev_pos.begin()+1, _1 == '\n');
if(!finished) {
if(curr_cnt < rec_sz) {
offset = (dev_pos[curr_cnt] - rb)+1;
//cout << "PATH 1 " << dev_pos[curr_cnt] << " " << offset << endl;
fseek(file_p, offset, SEEK_CUR);
total_processed = total_processed + rb + offset;
mRecCount = curr_cnt;
}
else {
offset = (dev_pos[rec_sz] - rb)+1;
//cout << "PATH 2 " << dev_pos[rec_sz] << " " << offset << endl;
fseek(file_p, offset, SEEK_CUR);
total_processed = total_processed + rb + offset;
mRecCount = rec_sz;
};
}
else {
mRecCount = curr_cnt + 1;
};
thrust::counting_iterator<unsigned int> begin(0);
ind_cnt[0] = mColumnCount;
parse_functor ff((const char*)thrust::raw_pointer_cast(d_readbuff.data()),(char**)thrust::raw_pointer_cast(dest.data()), thrust::raw_pointer_cast(ind.data()),
thrust::raw_pointer_cast(ind_cnt.data()), thrust::raw_pointer_cast(sepp.data()), thrust::raw_pointer_cast(dev_pos.data()), thrust::raw_pointer_cast(dest_len.data()));
thrust::for_each(begin, begin + mRecCount, ff);
ind_cnt[0] = max_len;
for(int i =0; i < mColumnCount; i++) {
if(type[columnNames[i]] == 0) { //int
thrust::device_ptr<char> p1((char*)dest[i]);
if(p1[4] == '-') { //date
if(!ts_cols[columnNames[i]]) {
gpu_date date_ff((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed);
thrust::for_each(begin, begin + mRecCount, date_ff);
}
else {
gpu_tdate date_ff((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed);
thrust::for_each(begin, begin + mRecCount, date_ff);
}
}
else { //int
if(decimal[columnNames[i]]) {
thrust::device_vector<unsigned int> scale(1);
scale[0] = decimal_zeroes[columnNames[i]];
gpu_atold atold((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed,
thrust::raw_pointer_cast(ind_cnt.data()), thrust::raw_pointer_cast(scale.data()));
thrust::for_each(begin, begin + mRecCount, atold);
}
else {
gpu_atoll atoll_ff((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed,
thrust::raw_pointer_cast(ind_cnt.data()));
thrust::for_each(begin, begin + mRecCount, atoll_ff);
};
};
thrust::copy(d_columns_int[columnNames[i]].begin() + recs_processed, d_columns_int[columnNames[i]].begin()+recs_processed+mRecCount, h_columns_int[columnNames[i]].begin() + recs_processed);
}
else if(type[columnNames[i]] == 1) {
gpu_atof atof_ff((const char*)dest[i],(double*)thrust::raw_pointer_cast(d_columns_float[columnNames[i]].data()) + recs_processed,
thrust::raw_pointer_cast(ind_cnt.data()));
thrust::for_each(begin, begin + mRecCount, atof_ff);
thrust::copy(d_columns_float[columnNames[i]].begin() + recs_processed, d_columns_float[columnNames[i]].begin()+recs_processed+mRecCount, h_columns_float[columnNames[i]].begin() + recs_processed);
}
else {//char is already done
thrust::device_ptr<char> p1((char*)dest[i]);
cudaMemcpy( h_columns_char[columnNames[i]] + char_size[columnNames[i]]*recs_processed, (void *)dest[i] , char_size[columnNames[i]]*mRecCount, cudaMemcpyDeviceToHost);
};
};
recs_processed = recs_processed + mRecCount;
cnt++;
};
if(finished) {
for(int i =0; i < mColumnCount; i++) {
if(dest[i]) {
cudaFree(dest[i]);
dest[i] = nullptr;
};
};
delete [] readbuff;
};
cout << "processed recs " << recs_processed << " " << getFreeMem() << endl;
first_time = 0;
mRecCount = recs_processed;
return finished;
};
void CudaSet::free() {
for(unsigned int i = 0; i < columnNames.size(); i++ ) {
if(type[columnNames[i]] == 0 && h_columns_int[columnNames[i]].size() ) {
h_columns_int[columnNames[i]].resize(0);
h_columns_int[columnNames[i]].shrink_to_fit();
}
else {
h_columns_float[columnNames[i]].resize(0);
h_columns_float[columnNames[i]].shrink_to_fit();
};
};
if(prm_d.size()) {
prm_d.resize(0);
prm_d.shrink_to_fit();
};
deAllocOnDevice();
};
void alloc_pool(unsigned int maxRecs) {
void* temp;
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, 8*maxRecs));
alloced_mem.push_back(temp);
};
bool* CudaSet::logical_and(bool* column1, bool* column2)
{
thrust::device_ptr<bool> dev_ptr1(column1);
thrust::device_ptr<bool> dev_ptr2(column2);
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, dev_ptr1, thrust::logical_and<bool>());
thrust::device_free(dev_ptr2);
return column1;
}
bool* CudaSet::logical_or(bool* column1, bool* column2)
{
thrust::device_ptr<bool> dev_ptr1(column1);
thrust::device_ptr<bool> dev_ptr2(column2);
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, dev_ptr1, thrust::logical_or<bool>());
thrust::device_free(dev_ptr2);
return column1;
}
bool* CudaSet::compare(int_type s, int_type d, int_type op_type)
{
bool res;
if (op_type == 2) // >
if(d>s) res = 1;
else res = 0;
else if (op_type == 1) // <
if(d<s) res = 1;
else res = 0;
else if (op_type == 6) // >=
if(d>=s) res = 1;
else res = 0;
else if (op_type == 5) // <=
if(d<=s) res = 1;
else res = 0;
else if (op_type == 4)// =
if(d==s) res = 1;
else res = 0;
else // !=
if(d!=s) res = 1;
else res = 0;
thrust::device_ptr<bool> p = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(p, p+mRecCount,res,(bool)0);
return thrust::raw_pointer_cast(p);
};
bool* CudaSet::compare(float_type s, float_type d, int_type op_type)
{
bool res;
if (op_type == 2) // >
if ((d-s) > EPSILON) res = 1;
else res = 0;
else if (op_type == 1) // <
if ((s-d) > EPSILON) res = 1;
else res = 0;
else if (op_type == 6) // >=
if (((d-s) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON))) res = 1;
else res = 0;
else if (op_type == 5) // <=
if (((s-d) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON))) res = 1;
else res = 0;
else if (op_type == 4)// =
if (((d-s) < EPSILON) && ((d-s) > -EPSILON)) res = 1;
else res = 0;
else // !=
if (!(((d-s) < EPSILON) && ((d-s) > -EPSILON))) res = 1;
else res = 0;
thrust::device_ptr<bool> p = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(p, p+mRecCount,res,(bool)0);
return thrust::raw_pointer_cast(p);
}
bool* CudaSet::compare(float_type* column1, float_type d, int_type op_type)
{
thrust::device_ptr<bool> res = thrust::device_malloc<bool>(mRecCount);
thrust::device_ptr<float_type> dev_ptr(column1);
if (op_type == 2) // >
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_greater());
else if (op_type == 1) // <
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_less());
else if (op_type == 6) // >=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_greater_equal_to());
else if (op_type == 5) // <=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_less_equal());
else if (op_type == 4)// =
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_equal_to());
else // !=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_not_equal_to());
return thrust::raw_pointer_cast(res);
}
bool* CudaSet::compare(int_type* column1, int_type d, int_type op_type, unsigned int p1, unsigned int p2)
{
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
thrust::device_ptr<int_type> dev_ptr(column1);
if(p2)
d = d*(unsigned int)pow(10, p2);
if (op_type == 2) // >
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::greater<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::greater<int_type>());
else if (op_type == 1) // <
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::less<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::less<int_type>());
else if (op_type == 6) // >=
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::greater_equal<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::greater_equal<int_type>());
else if (op_type == 5) // <=
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::less_equal<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::less_equal<int_type>());
else if (op_type == 4)// =
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::equal_to<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::equal_to<int_type>());
else // !=
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::not_equal_to<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::not_equal_to<int_type>());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(int_type* column1, int_type* column2, int_type op_type, unsigned int p1, unsigned int p2)
{
thrust::device_ptr<int_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr2(column2);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
if (op_type == 2) // >
if(!p1 && !p2) {
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::greater<int_type>());
}
else if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater<int_type>());
else if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::greater<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater<int_type>());
else if (op_type == 1) // <
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::less<int_type>());
else if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less<int_type>());
else if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::less<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less<int_type>());
else if (op_type == 6) // >=
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::greater_equal<int_type>());
else if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater_equal<int_type>());
else if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::greater_equal<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater_equal<int_type>());
else if (op_type == 5) // <=
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::less_equal<int_type>());
else if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less_equal<int_type>());
else if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::less_equal<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less_equal<int_type>());
else if (op_type == 4)// =
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::equal_to<int_type>());
else if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::equal_to<int_type>());
else if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::equal_to<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::equal_to<int_type>());
else // !=
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::not_equal_to<int_type>());
else if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::not_equal_to<int_type>());
else if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::not_equal_to<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::not_equal_to<int_type>());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, float_type* column2, int_type op_type)
{
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<float_type> dev_ptr2(column2);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater());
else if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less());
else if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater_equal_to());
else if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less_equal());
else if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_equal_to());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_not_equal_to());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, int_type* column2, int_type op_type)
{
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr(column2);
thrust::device_ptr<float_type> dev_ptr2 = thrust::device_malloc<float_type>(mRecCount);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
thrust::transform(dev_ptr, dev_ptr + mRecCount, dev_ptr2, long_to_float_type());
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater());
else if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less());
else if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater_equal_to());
else if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less_equal());
else if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_equal_to());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_not_equal_to());
thrust::device_free(dev_ptr2);
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(int_type* column1, float_type* column2, string op_type, bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::device_ptr<int_type> dev_ptr(column1);
thrust::transform(dev_ptr, dev_ptr + mRecCount, temp, long_to_float_type()); // in-place transformation
thrust::device_ptr<float_type> dev_ptr1(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
int_type* CudaSet::op(int_type* column1, int_type d, string op_type, bool reverse, unsigned int p1, unsigned int p2)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
//cout << "OP " << d << " " << op_type << " " << p1 << " " << p2 << endl;
thrust::device_ptr<int_type> temp((int_type*)alloced_mem.back());
thrust::device_ptr<int_type> dev_ptr1(column1);
unsigned int d1 = d;
if(p2)
d = d*(unsigned int)pow(10, p2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d1), temp, thrust::multiplies<int_type>());
}
else if (op_type.compare("ADD") == 0) {
if(!p1)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d*(unsigned int)pow(10, p2)), temp, thrust::plus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::plus<int_type>());
}
else if (op_type.compare("MINUS") == 0) {
if(!p1)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d*(unsigned int)pow(10, p2)), temp, thrust::minus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::minus<int_type>());
}
else {
if(!p1)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d*(unsigned int)pow(10, p2)), temp, thrust::divides<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::divides<int_type>());
}
}
else {
if (op_type.compare("MUL") == 0) {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::multiplies<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::multiplies<int_type>());
}
else if (op_type.compare("ADD") == 0) {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::plus<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::plus<int_type>());
}
else if (op_type.compare("MINUS") == 0) {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::minus<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::minus<int_type>());
}
else {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::divides<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::divides<int_type>());
};
};
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
int_type* CudaSet::op(int_type* column1, int_type* column2, string op_type, bool reverse, unsigned int p1, unsigned int p2)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<int_type> temp((int_type*)alloced_mem.back());
thrust::device_ptr<int_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr2(column2);
//cout << "OP " << op_type << " " << p1 << " " << p2 << " " << reverse << endl;
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::multiplies<int_type>());
}
else if (op_type.compare("ADD") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::plus<int_type>());
else if(p1 && p2) {
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::plus<int_type>());
}
else if (p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::plus<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::plus<int_type>());
}
else if (op_type.compare("MINUS") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::minus<int_type>());
else if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::minus<int_type>());
else if (p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::minus<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::minus<int_type>());
}
else {
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::divides<int_type>());
else if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::divides<int_type>());
else if (p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::divides<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::divides<int_type>());
}
}
else {
if (op_type.compare("MUL") == 0) {
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::multiplies<int_type>());
}
else if (op_type.compare("ADD") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::plus<int_type>());
else if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::plus<int_type>());
else if (p1)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::plus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), dev_ptr1, temp, thrust::plus<int_type>());
}
else if (op_type.compare("MINUS") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::minus<int_type>());
else if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::minus<int_type>());
else if (p1)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::minus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), dev_ptr1, temp, thrust::minus<int_type>());
}
else {
if(!p1 && !p2)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::divides<int_type>());
else if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::divides<int_type>());
else if (p1)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::divides<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), dev_ptr1, temp, thrust::divides<int_type>());
}
}
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(float_type* column1, float_type* column2, string op_type, bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<float_type> dev_ptr2(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(int_type* column1, float_type d, string op_type, bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::fill(temp, temp+mRecCount, d);
thrust::device_ptr<int_type> dev_ptr(column1);
thrust::device_ptr<float_type> dev_ptr1 = thrust::device_malloc<float_type>(mRecCount);
thrust::transform(dev_ptr, dev_ptr + mRecCount, dev_ptr1, long_to_float_type());
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
thrust::device_free(dev_ptr1);
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(float_type* column1, float_type d, string op_type,bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::device_ptr<float_type> dev_ptr1(column1);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
alloced_mem.pop_back();
return (float_type*)thrust::raw_pointer_cast(temp);
}
char CudaSet::loadIndex(const string index_name, const unsigned int segment)
{
FILE* f;
unsigned int bits_encoded, fit_count, vals_count, sz, real_count;
void* d_str;
string f1 = index_name + "." + to_string(segment);
char res;
if(interactive) {
if(index_buffers.find(f1) == index_buffers.end()) {
f = fopen (f1.c_str(), "rb" );
fseek(f, 0, SEEK_END);
long fileSize = ftell(f);
char* buff;
cudaHostAlloc(&buff, fileSize, cudaHostAllocDefault);
fseek(f, 0, SEEK_SET);
fread(buff, fileSize, 1, f);
fclose(f);
index_buffers[f1] = buff;
};
sz = ((unsigned int*)index_buffers[f1])[0];
idx_dictionary_int[index_name].clear();
for(unsigned int i = 0; i < sz; i++) {
idx_dictionary_int[index_name][((int_type*)(index_buffers[f1]+4+8*i))[0]] = i;
};
vals_count = ((unsigned int*)(index_buffers[f1]+4 +8*sz))[2];
real_count = ((unsigned int*)(index_buffers[f1]+4 +8*sz))[3];
mRecCount = real_count;
res = (index_buffers[f1]+4 +8*sz + (vals_count+2)*int_size)[0];
cudaMalloc((void **) &d_str, (vals_count+2)*int_size);
cudaMemcpy( d_str, (void *) &((index_buffers[f1]+4 +8*sz)[0]), (vals_count+2)*int_size, cudaMemcpyHostToDevice);
if(idx_vals.count(index_name))
cudaFree(idx_vals[index_name]);
idx_vals[index_name] = (unsigned long long int*)d_str;
}
else {
f = fopen (f1.c_str(), "rb" );
fread(&sz, 4, 1, f);
int_type* d_array = new int_type[sz];
idx_dictionary_int[index_name].clear();
fread((void*)d_array, sz*int_size, 1, f);
for(unsigned int i = 0; i < sz; i++) {
idx_dictionary_int[index_name][d_array[i]] = i;
//cout << index_name << " " << d_array[i] << " " << i << endl;
};
delete [] d_array;
fread(&fit_count, 4, 1, f);
fread(&bits_encoded, 4, 1, f);
fread(&vals_count, 4, 1, f);
fread(&real_count, 4, 1, f);
mRecCount = real_count;
unsigned long long int* int_array = new unsigned long long int[vals_count+2];
fseek ( f , -16 , SEEK_CUR );
fread((void*)int_array, 1, vals_count*8 + 16, f);
fread(&res, 1, 1, f);
fclose(f);
void* d_str;
cudaMalloc((void **) &d_str, (vals_count+2)*int_size);
cudaMemcpy( d_str, (void *) int_array, (vals_count+2)*int_size, cudaMemcpyHostToDevice);
if(idx_vals.count(index_name))
cudaFree(idx_vals[index_name]);
idx_vals[index_name] = (unsigned long long int*)d_str;
}
return res;
}
void CudaSet::initialize(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs, string file_name) // compressed data for DIM tables
{
mColumnCount = (unsigned int)nameRef.size();
FILE* f;
string f1;
unsigned int cnt;
char buffer[4000];
string str;
not_compressed = 0;
mRecCount = Recs;
hostRecCount = Recs;
totalRecs = Recs;
load_file_name = file_name;
f1 = file_name + ".sort";
f = fopen (f1.c_str() , "rb" );
if(f) {
unsigned int sz, idx;
fread((char *)&sz, 4, 1, f);
for(unsigned int j = 0; j < sz; j++) {
fread((char *)&idx, 4, 1, f);
fread(buffer, idx, 1, f);
str.assign(buffer, idx);
sorted_fields.push(str);
if(verbose)
cout << "segment sorted on " << str << endl;
};
fclose(f);
};
f1 = file_name + ".presort";
f = fopen (f1.c_str() , "rb" );
if(f) {
unsigned int sz, idx;
fread((char *)&sz, 4, 1, f);
for(unsigned int j = 0; j < sz; j++) {
fread((char *)&idx, 4, 1, f);
fread(buffer, idx, 1, f);
str.assign(buffer, idx);
presorted_fields.push(str);
if(verbose)
cout << "presorted on " << str << endl;
};
fclose(f);
};
tmp_table = 0;
filtered = 0;
for(unsigned int i=0; i < mColumnCount; i++) {
//f1 = file_name + "." + nameRef.front() + ".0";
//f = fopen (f1.c_str() , "rb" );
//fread((char *)&bytes, 4, 1, f); //need to read metadata such as type and length
//fclose(f);
columnNames.push_back(nameRef.front());
cols[colsRef.front()] = nameRef.front();
if (((typeRef.front()).compare("decimal") == 0) || ((typeRef.front()).compare("int") == 0)) {
f1 = file_name + "." + nameRef.front() + ".0";
f = fopen (f1.c_str() , "rb" );
if(!f) {
cout << "Couldn't find field " << nameRef.front() << endl;
exit(0);
};
for(unsigned int j = 0; j < 6; j++)
fread((char *)&cnt, 4, 1, f);
fclose(f);
compTypes[nameRef.front()] = cnt;
};
if((typeRef.front()).compare("timestamp") == 0)
ts_cols[nameRef.front()] = 1;
else
ts_cols[nameRef.front()] = 0;
if ((typeRef.front()).compare("int") == 0 || (typeRef.front()).compare("timestamp") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 0;
decimal_zeroes[nameRef.front()] = 0;
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else if ((typeRef.front()).compare("float") == 0) {
type[nameRef.front()] = 1;
decimal[nameRef.front()] = 0;
h_columns_float[nameRef.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
d_columns_float[nameRef.front()] = thrust::device_vector<float_type >();
}
else if ((typeRef.front()).compare("decimal") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 1;
decimal_zeroes[nameRef.front()] = sizeRef.front();
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else {
type[nameRef.front()] = 2;
decimal[nameRef.front()] = 0;
h_columns_char[nameRef.front()] = nullptr;
d_columns_char[nameRef.front()] = nullptr;
char_size[nameRef.front()] = sizeRef.front();
string_map[nameRef.front()] = file_name + "." + nameRef.front();
};
nameRef.pop();
typeRef.pop();
sizeRef.pop();
colsRef.pop();
};
};
void CudaSet::initialize(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs)
{
mColumnCount = (unsigned int)nameRef.size();
tmp_table = 0;
filtered = 0;
mRecCount = 0;
hostRecCount = Recs;
segCount = 0;
for(unsigned int i=0; i < mColumnCount; i++) {
columnNames.push_back(nameRef.front());
cols[colsRef.front()] = nameRef.front();
if((typeRef.front()).compare("timestamp") == 0)
ts_cols[nameRef.front()] = 1;
else
ts_cols[nameRef.front()] = 0;
if ((typeRef.front()).compare("int") == 0 || (typeRef.front()).compare("timestamp") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 0;
decimal_zeroes[nameRef.front()] = 0;
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else if ((typeRef.front()).compare("float") == 0) {
type[nameRef.front()] = 1;
decimal[nameRef.front()] = 0;
h_columns_float[nameRef.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
d_columns_float[nameRef.front()] = thrust::device_vector<float_type>();
}
else if ((typeRef.front()).compare("decimal") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 1;
decimal_zeroes[nameRef.front()] = sizeRef.front();
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else {
type[nameRef.front()] = 2;
decimal[nameRef.front()] = 0;
h_columns_char[nameRef.front()] = nullptr;
d_columns_char[nameRef.front()] = nullptr;
char_size[nameRef.front()] = sizeRef.front();
};
nameRef.pop();
typeRef.pop();
sizeRef.pop();
colsRef.pop();
};
};
void CudaSet::initialize(const size_t RecordCount, const unsigned int ColumnCount)
{
mRecCount = RecordCount;
hostRecCount = RecordCount;
mColumnCount = ColumnCount;
filtered = 0;
};
void CudaSet::initialize(queue<string> op_sel, const queue<string> op_sel_as)
{
mRecCount = 0;
mColumnCount = (unsigned int)op_sel.size();
segCount = 1;
not_compressed = 1;
filtered = 0;
col_aliases = op_sel_as;
unsigned int i = 0;
CudaSet *a;
while(!op_sel.empty()) {
for(auto it = varNames.begin(); it != varNames.end(); it++) {
a = it->second;
if(std::find(a->columnNames.begin(), a->columnNames.end(), op_sel.front()) != a->columnNames.end())
break;
};
type[op_sel.front()] = a->type[op_sel.front()];
cols[i] = op_sel.front();
decimal[op_sel.front()] = a->decimal[op_sel.front()];
decimal_zeroes[op_sel.front()] = a->decimal_zeroes[op_sel.front()];
columnNames.push_back(op_sel.front());
if (a->type[op_sel.front()] == 0) {
d_columns_int[op_sel.front()] = thrust::device_vector<int_type>();
//h_columns_int[op_sel.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
h_columns_int[op_sel.front()] = thrust::host_vector<int_type>();
}
else if (a->type[op_sel.front()] == 1) {
d_columns_float[op_sel.front()] = thrust::device_vector<float_type>();
//h_columns_float[op_sel.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
h_columns_float[op_sel.front()] = thrust::host_vector<float_type>();
}
else {
h_columns_char[op_sel.front()] = nullptr;
d_columns_char[op_sel.front()] = nullptr;
char_size[op_sel.front()] = a->char_size[op_sel.front()];
};
i++;
op_sel.pop();
};
}
void CudaSet::initialize(CudaSet* a, CudaSet* b, queue<string> op_sel, queue<string> op_sel_as)
{
mRecCount = 0;
mColumnCount = 0;
queue<string> q_cnt(op_sel);
unsigned int i = 0;
set<string> field_names;
while(!q_cnt.empty()) {
if( std::find(a->columnNames.begin(), a->columnNames.end(), q_cnt.front()) != a->columnNames.end() ||
std::find(b->columnNames.begin(), b->columnNames.end(), q_cnt.front()) != b->columnNames.end()) {
field_names.insert(q_cnt.front());
};
q_cnt.pop();
}
mColumnCount = (unsigned int)field_names.size();
maxRecs = b->maxRecs;
segCount = 1;
filtered = 0;
not_compressed = 1;
col_aliases = op_sel_as;
i = 0;
while(!op_sel.empty()) {
if(std::find(columnNames.begin(), columnNames.end(), op_sel.front()) == columnNames.end()) {
if(std::find(a->columnNames.begin(), a->columnNames.end(), op_sel.front()) != a->columnNames.end()) {
cols[i] = op_sel.front();
decimal[op_sel.front()] = a->decimal[op_sel.front()];
columnNames.push_back(op_sel.front());
type[op_sel.front()] = a->type[op_sel.front()];
ts_cols[op_sel.front()] = a->ts_cols[op_sel.front()];
if (a->type[op_sel.front()] == 0) {
d_columns_int[op_sel.front()] = thrust::device_vector<int_type>();
h_columns_int[op_sel.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
if(a->string_map.find(op_sel.front()) != a->string_map.end()) {
string_map[op_sel.front()] = a->string_map[op_sel.front()];
};
decimal[op_sel.front()] = a->decimal[op_sel.front()];
decimal_zeroes[op_sel.front()] = a->decimal_zeroes[op_sel.front()];
}
else if (a->type[op_sel.front()] == 1) {
d_columns_float[op_sel.front()] = thrust::device_vector<float_type>();
h_columns_float[op_sel.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
}
else {
h_columns_char[op_sel.front()] = nullptr;
d_columns_char[op_sel.front()] = nullptr;
char_size[op_sel.front()] = a->char_size[op_sel.front()];
string_map[op_sel.front()] = a->string_map[op_sel.front()];
};
i++;
}
else if(std::find(b->columnNames.begin(), b->columnNames.end(), op_sel.front()) != b->columnNames.end()) {
columnNames.push_back(op_sel.front());
cols[i] = op_sel.front();
decimal[op_sel.front()] = b->decimal[op_sel.front()];
type[op_sel.front()] = b->type[op_sel.front()];
ts_cols[op_sel.front()] = b->ts_cols[op_sel.front()];
if (b->type[op_sel.front()] == 0) {
d_columns_int[op_sel.front()] = thrust::device_vector<int_type>();
h_columns_int[op_sel.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
if(b->string_map.find(op_sel.front()) != b->string_map.end()) {
string_map[op_sel.front()] = b->string_map[op_sel.front()];
};
decimal[op_sel.front()] = b->decimal[op_sel.front()];
decimal_zeroes[op_sel.front()] = b->decimal_zeroes[op_sel.front()];
}
else if (b->type[op_sel.front()] == 1) {
d_columns_float[op_sel.front()] = thrust::device_vector<float_type>();
h_columns_float[op_sel.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
}
else {
h_columns_char[op_sel.front()] = nullptr;
d_columns_char[op_sel.front()] = nullptr;
char_size[op_sel.front()] = b->char_size[op_sel.front()];
string_map[op_sel.front()] = b->string_map[op_sel.front()];
};
i++;
}
}
op_sel.pop();
};
};
int_type reverse_op(int_type op_type)
{
if (op_type == 2) // >
return 1;
else if (op_type == 1) // <
return 2;
else if (op_type == 6) // >=
return 5;
else if (op_type == 5) // <=
return 6;
else return op_type;
}
size_t getFreeMem()
{
size_t available, total;
cudaMemGetInfo(&available, &total);
return available;
} ;
void allocColumns(CudaSet* a, queue<string> fields)
{
if(a->filtered) {
CudaSet* t;
if(a->filtered)
t = varNames[a->source_name];
else
t = a;
if(int_size*t->maxRecs > alloced_sz) {
if(alloced_sz) {
cudaFree(alloced_tmp);
};
cudaMalloc((void **) &alloced_tmp, int_size*t->maxRecs);
alloced_sz = int_size*t->maxRecs;
}
}
else {
while(!fields.empty()) {
if(var_exists(a, fields.front()) && !a->onDevice(fields.front())) {
a->allocColumnOnDevice(fields.front(), a->maxRecs);
}
fields.pop();
};
};
}
void gatherColumns(CudaSet* a, CudaSet* t, string field, unsigned int segment, size_t& count)
{
if(!a->onDevice(field)) {
a->allocColumnOnDevice(field, a->maxRecs);
};
if(a->prm_index == 'R') {
mygather(field, a, t, count, a->mRecCount);
}
else {
mycopy(field, a, t, count, t->mRecCount);
a->mRecCount = t->mRecCount;
};
}
void copyFinalize(CudaSet* a, queue<string> fields, bool ts)
{
set<string> uniques;
if(scratch.size() < a->mRecCount*8)
scratch.resize(a->mRecCount*8);
thrust::device_ptr<int_type> tmp((int_type*)thrust::raw_pointer_cast(scratch.data()));
while(!fields.empty()) {
if (uniques.count(fields.front()) == 0 && var_exists(a, fields.front()) && cpy_bits.find(fields.front()) != cpy_bits.end() && (!a->ts_cols[fields.front()] || ts)) {
if(cpy_bits[fields.front()] == 8) {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<unsigned char> src((unsigned char*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned char>());
}
else {
thrust::device_ptr<unsigned char> src((unsigned char*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned char>());
};
}
else if(cpy_bits[fields.front()] == 16) {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned short>());
}
else {
thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned short>());
};
}
else if(cpy_bits[fields.front()] == 32) {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned int>());
}
else {
thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned int>());
};
}
else {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<int_type> src((int_type*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::copy(src, src+a->mRecCount, tmp);
}
else {
thrust::device_ptr<int_type> src((int_type*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::copy(src, src+a->mRecCount, tmp);
};
};
thrust::constant_iterator<int_type> iter(cpy_init_val[fields.front()]);
if(a->type[fields.front()] != 1) {
thrust::transform(tmp, tmp + a->mRecCount, iter, a->d_columns_int[fields.front()].begin(), thrust::plus<int_type>());
}
else {
thrust::device_ptr<int_type> dest((int_type*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(tmp, tmp + a->mRecCount, iter, dest, thrust::plus<int_type>());
thrust::transform(dest, dest+a->mRecCount, a->d_columns_float[fields.front()].begin(), long_to_float());
};
};
uniques.insert(fields.front());
fields.pop();
};
}
void copyColumns(CudaSet* a, queue<string> fields, unsigned int segment, size_t& count, bool rsz, bool flt)
{
//std::clock_t start1 = std::clock();
set<string> uniques;
if(a->filtered) { //filter the segment
if(flt) {
filter_op(a->fil_s, a->fil_f, segment);
};
if(rsz && a->mRecCount) {
queue<string> fields1(fields);
while(!fields1.empty()) {
a->resizeDeviceColumn(a->devRecCount + a->mRecCount, fields1.front());
fields1.pop();
};
a->devRecCount = a->devRecCount + a->mRecCount;
};
};
cpy_bits.clear();
cpy_init_val.clear();
auto f(fields);
while(!fields.empty()) {
if (uniques.count(fields.front()) == 0 && var_exists(a, fields.front())) {
if(a->filtered) {
if(a->mRecCount) {
CudaSet *t = varNames[a->source_name];
alloced_switch = 1;
t->CopyColumnToGpu(fields.front(), segment);
gatherColumns(a, t, fields.front(), segment, count);
alloced_switch = 0;
};
}
else {
if(a->mRecCount) {
a->CopyColumnToGpu(fields.front(), segment, count);
};
};
uniques.insert(fields.front());
};
fields.pop();
};
//std::cout<< "copy time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) <<'\n';
}
void mygather(string colname, CudaSet* a, CudaSet* t, size_t offset, size_t g_size)
{
if(t->type[colname] != 1 ) {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else if(cpy_bits[colname] == 16) {
thrust::device_ptr<unsigned short int> d_col_source((unsigned short int*)alloced_tmp);
thrust::device_ptr<unsigned short int> d_col_dest((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_int[colname].begin() + offset);
};
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_int[colname].begin() + offset);
};
}
else {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else if(cpy_bits[colname] == 16) {
thrust::device_ptr<unsigned short int> d_col_source((unsigned short int*)alloced_tmp);
thrust::device_ptr<unsigned short int> d_col_dest((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_float[colname].begin() + offset);
};
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_float[colname].begin() + offset);
};
}
};
void mycopy(string colname, CudaSet* a, CudaSet* t, size_t offset, size_t g_size)
{
if(t->type[colname] != 1) {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else if(cpy_bits[colname] == 16) {
thrust::device_ptr<short int> d_col_source((short int*)alloced_tmp);
thrust::device_ptr<short int> d_col_dest((short int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()+offset));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col_source((int_type*)alloced_tmp);
thrust::copy(d_col_source, d_col_source + g_size, a->d_columns_int[colname].begin() + offset);
};
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::copy(d_col, d_col + g_size, a->d_columns_int[colname].begin() + offset);
};
}
else {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else if(cpy_bits[colname] == 16) {
thrust::device_ptr<short int> d_col_source((short int*)alloced_tmp);
thrust::device_ptr<short int> d_col_dest((short int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()+offset));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col_source((int_type*)alloced_tmp);
thrust::copy(d_col_source, d_col_source + g_size, a->d_columns_float[colname].begin() + offset);
};
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::copy(d_col, d_col + g_size, a->d_columns_float[colname].begin() + offset);
};
};
};
size_t load_queue(queue<string> c1, CudaSet* right, string f2, size_t &rcount,
unsigned int start_segment, unsigned int end_segment, bool rsz, bool flt)
{
queue<string> cc;
while(!c1.empty()) {
if(std::find(right->columnNames.begin(), right->columnNames.end(), c1.front()) != right->columnNames.end()) {
if(f2 != c1.front() ) {
cc.push(c1.front());
};
};
c1.pop();
};
if(std::find(right->columnNames.begin(), right->columnNames.end(), f2) != right->columnNames.end()) {
cc.push(f2);
};
if(right->filtered) {
allocColumns(right, cc);
};
rcount = right->maxRecs;
queue<string> ct(cc);
while(!ct.empty()) {
if(right->filtered && rsz) {
right->mRecCount = 0;
}
else {
right->allocColumnOnDevice(ct.front(), rcount*right->segCount);
};
ct.pop();
};
size_t cnt_r = 0;
right->devRecCount = 0;
for(unsigned int i = start_segment; i < end_segment; i++) {
if(!right->filtered)
copyColumns(right, cc, i, cnt_r, rsz, 0);
else
copyColumns(right, cc, i, cnt_r, rsz, flt);
cnt_r = cnt_r + right->mRecCount;
};
right->mRecCount = cnt_r;
return cnt_r;
}
size_t max_char(CudaSet* a)
{
size_t max_char1 = 8;
for(unsigned int i = 0; i < a->columnNames.size(); i++) {
if(a->type[a->columnNames[i]] == 2) {
if (a->char_size[a->columnNames[i]] > max_char1)
max_char1 = a->char_size[a->columnNames[i]];
}
else if(a->type[a->columnNames[i]] == 0 && a->string_map.find(a->columnNames[i]) != a->string_map.end()) {
auto s = a->string_map[a->columnNames[i]];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
if (len > max_char1)
max_char1 = len;
};
};
return max_char1;
};
size_t max_char(CudaSet* a, queue<string> field_names)
{
size_t max_char = 8;
while (!field_names.empty()) {
if (a->type[field_names.front()] == 2) {
if (a->char_size[field_names.front()] > max_char)
max_char = a->char_size[field_names.front()];
};
field_names.pop();
};
return max_char;
};
void setSegments(CudaSet* a, queue<string> cols)
{
size_t mem_available = getFreeMem();
size_t tot_sz = 0;
while(!cols.empty()) {
if(a->type[cols.front()] != 2)
tot_sz = tot_sz + int_size;
else
tot_sz = tot_sz + a->char_size[cols.front()];
cols.pop();
};
if(a->mRecCount*tot_sz > mem_available/3) { //default is 3
a->segCount = (a->mRecCount*tot_sz)/(mem_available/5) + 1;
a->maxRecs = (a->mRecCount/a->segCount)+1;
};
};
void update_permutation_char_host(char* key, unsigned int* permutation, size_t RecCount, string SortType, char* tmp, unsigned int len)
{
str_gather_host(permutation, RecCount, (void*)key, (void*)tmp, len);
if (SortType.compare("DESC") == 0 )
str_sort_host(tmp, RecCount, permutation, 1, len);
else
str_sort_host(tmp, RecCount, permutation, 0, len);
}
void apply_permutation_char(char* key, unsigned int* permutation, size_t RecCount, char* tmp, unsigned int len)
{
// copy keys to temporary vector
cudaMemcpy( (void*)tmp, (void*) key, RecCount*len, cudaMemcpyDeviceToDevice);
// permute the keys
str_gather((void*)permutation, RecCount, (void*)tmp, (void*)key, len);
}
void apply_permutation_char_host(char* key, unsigned int* permutation, size_t RecCount, char* res, unsigned int len)
{
str_gather_host(permutation, RecCount, (void*)key, (void*)res, len);
}
void filter_op(const char *s, const char *f, unsigned int segment)
{
CudaSet *a, *b;
a = varNames.find(f)->second;
a->name = f;
//std::clock_t start1 = std::clock();
if(a->mRecCount == 0 && !a->filtered) {
b = new CudaSet(0,1);
}
else {
if(verbose)
cout << "FILTER " << s << " " << f << " " << getFreeMem() << '\xd';
b = varNames[s];
b->name = s;
b->string_map = a->string_map;
size_t cnt = 0;
b->sorted_fields = a->sorted_fields;
b->ts_cols = a->ts_cols;
allocColumns(a, b->fil_value);
if (b->prm_d.size() == 0) {
b->prm_d.resize(a->maxRecs);
};
//cout << endl << "MAP CHECK start " << segment << endl;
char map_check = zone_map_check(b->fil_type,b->fil_value,b->fil_nums, b->fil_nums_f, b->fil_nums_precision, a, segment);
//char map_check = 'R';
cout << endl << "MAP CHECK segment " << segment << " " << map_check << endl;
if(map_check == 'R') {
auto old_ph = phase_copy;
phase_copy = 0;
copyColumns(a, b->fil_value, segment, cnt);
phase_copy = old_ph;
bool* res = filter(b->fil_type,b->fil_value,b->fil_nums, b->fil_nums_f, b->fil_nums_precision, a, segment);
thrust::device_ptr<bool> bp((bool*)res);
b->prm_index = 'R';
b->mRecCount = thrust::count(bp, bp + (unsigned int)a->mRecCount, 1);
thrust::copy_if(thrust::make_counting_iterator((unsigned int)0), thrust::make_counting_iterator((unsigned int)a->mRecCount),
bp, b->prm_d.begin(), thrust::identity<bool>());
cudaFree(res);
}
else {
b->prm_index = map_check;
if(map_check == 'A')
b->mRecCount = a->mRecCount;
else
b->mRecCount = 0;
};
if(segment == a->segCount-1)
a->deAllocOnDevice();
}
if(verbose)
cout << endl << "filter result " << b->mRecCount << endl;
}
size_t load_right(CudaSet* right, string f2, queue<string> op_g, queue<string> op_alt, size_t& rcount, unsigned int start_seg, unsigned int end_seg) {
size_t cnt_r = 0;
//if join is on strings then add integer columns to left and right tables and modify colInd1 and colInd2
// need to allocate all right columns
if(right->not_compressed) {
queue<string> op_alt1;
op_alt1.push(f2);
cnt_r = load_queue(op_alt1, right, "", rcount, start_seg, end_seg, 1, 1);
queue<string> op_alt2;
while(!op_alt.empty()) {
if(f2.compare(op_alt.front())) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), op_alt.front()) != right->columnNames.end()) {
op_alt2.push(op_alt.front());
};
};
op_alt.pop();
};
if(!op_alt2.empty())
cnt_r = load_queue(op_alt2, right, "", rcount, start_seg, end_seg, 0, 0);
}
else {
cnt_r = load_queue(op_alt, right, f2, rcount, start_seg, end_seg, 1, 1);
};
return cnt_r;
};
void insert_records(const char* f, const char* s) {
char buf[4096];
size_t size, maxRecs, cnt = 0;
string str_s, str_d;
if(varNames.find(s) == varNames.end()) {
process_error(3, "couldn't find " + string(s) );
};
CudaSet *a;
a = varNames.find(s)->second;
a->name = s;
if(varNames.find(f) == varNames.end()) {
process_error(3, "couldn't find " + string(f) );
};
CudaSet *b;
b = varNames.find(f)->second;
b->name = f;
// if both source and destination are on disk
cout << "SOURCES " << a->source << ":" << b->source << endl;
if(a->source && b->source) {
for(unsigned int i = 0; i < a->segCount; i++) {
for(unsigned int z = 0; z < a->columnNames.size(); z++) {
if(a->type[a->columnNames[z]] != 2) {
str_s = a->load_file_name + "." + a->columnNames[z] + "." + to_string(i);
str_d = b->load_file_name + "." + a->columnNames[z] + "." + to_string(b->segCount + i);
cout << str_s << " " << str_d << endl;
FILE* source = fopen(str_s.c_str(), "rb");
FILE* dest = fopen(str_d.c_str(), "wb");
while (size = fread(buf, 1, BUFSIZ, source)) {
fwrite(buf, 1, size, dest);
}
fclose(source);
fclose(dest);
}
else { //merge strings
//read b's strings
str_s = b->load_file_name + "." + b->columnNames[z];
FILE* dest = fopen(str_s.c_str(), "rb");
auto len = b->char_size[b->columnNames[z]];
map<string, unsigned long long int> map_d;
buf[len] = 0;
unsigned long long cnt = 0;
while (fread(buf, len, 1, dest)) {
map_d[buf] = cnt;
cnt++;
};
fclose(dest);
unsigned long long int cct = cnt;
str_s = a->load_file_name + "." + a->columnNames[z] + "." + to_string(i) + ".hash";
str_d = b->load_file_name + "." + b->columnNames[z] + "." + to_string(b->segCount + i) + ".hash";
FILE* source = fopen(str_s.c_str(), "rb");
dest = fopen(str_d.c_str(), "wb");
while (size = fread(buf, 1, BUFSIZ, source)) {
fwrite(buf, 1, size, dest);
}
fclose(source);
fclose(dest);
str_s = a->load_file_name + "." + a->columnNames[z];
source = fopen(str_s.c_str(), "rb");
map<unsigned long long int, string> map_s;
buf[len] = 0;
cnt = 0;
while (fread(buf, len, 1, source)) {
map_s[cnt] = buf;
cnt++;
};
fclose(source);
queue<string> op_vx;
op_vx.push(a->columnNames[z]);
allocColumns(a, op_vx);
a->resize(a->maxRecs);
a->CopyColumnToGpu(a->columnNames[z], z, 0);
a->CopyColumnToHost(a->columnNames[z]);
str_d = b->load_file_name + "." + b->columnNames[z];
fstream f_file;
f_file.open(str_d.c_str(), ios::out|ios::app|ios::binary);
for(auto j = 0; j < a->mRecCount; j++) {
auto ss = map_s[a->h_columns_int[a->columnNames[z]][j]];
if(map_d.find(ss) == map_d.end()) { //add
f_file.write((char *)ss.c_str(), len);
a->h_columns_int[a->columnNames[z]][j] = cct;
cct++;
}
else {
a->h_columns_int[a->columnNames[z]][j] = map_d[ss];
};
};
f_file.close();
thrust::device_vector<int_type> d_col(a->mRecCount);
thrust::copy(a->h_columns_int[a->columnNames[z]].begin(), a->h_columns_int[a->columnNames[z]].begin() + a->mRecCount, d_col.begin());
auto i_name = b->load_file_name + "." + b->columnNames[z] + "." + to_string(b->segCount + i) + ".idx";
pfor_compress(thrust::raw_pointer_cast(d_col.data()), a->mRecCount*int_size, i_name, a->h_columns_int[a->columnNames[z]], 0);
};
};
};
if(a->maxRecs > b->maxRecs)
maxRecs = a->maxRecs;
else
maxRecs = b->maxRecs;
for(unsigned int i = 0; i < b->columnNames.size(); i++) {
b->reWriteHeader(b->load_file_name, b->columnNames[i], a->segCount + b->segCount, a->totalRecs + b->totalRecs, maxRecs);
};
}
else if(!a->source && !b->source) { //if both source and destination are in memory
size_t oldCount = b->mRecCount;
b->resize(a->mRecCount);
for(unsigned int z = 0; z< b->mColumnCount; z++) {
if(b->type[a->columnNames[z]] == 0) {
thrust::copy(a->h_columns_int[a->columnNames[z]].begin(), a->h_columns_int[a->columnNames[z]].begin() + a->mRecCount, b->h_columns_int[b->columnNames[z]].begin() + oldCount);
}
else if(b->type[a->columnNames[z]] == 1) {
thrust::copy(a->h_columns_float[a->columnNames[z]].begin(), a->h_columns_float[a->columnNames[z]].begin() + a->mRecCount, b->h_columns_float[b->columnNames[z]].begin() + oldCount);
}
else {
cudaMemcpy(b->h_columns_char[b->columnNames[z]] + b->char_size[b->columnNames[z]]*oldCount, a->h_columns_char[a->columnNames[z]], a->char_size[a->columnNames[z]]*a->mRecCount, cudaMemcpyHostToHost);
};
};
}
else if(!a->source && b->source) {
total_segments = b->segCount;
total_count = b->mRecCount;
total_max = b->maxRecs;;
queue<string> op_vx;
for(unsigned int i=0; i < a->columnNames.size(); i++)
op_vx.push(a->columnNames[i]);
allocColumns(a, op_vx);
a->resize(a->maxRecs);
for(unsigned int i = 0; i < a->segCount; i++) {
if (a->filtered) {
copyColumns(a, op_vx, i, cnt);
a->CopyToHost(0, a->mRecCount);
};
a->compress(b->load_file_name, 0, 1, i - (a->segCount-1), a->mRecCount, 0);
};
for(unsigned int i = 0; i < b->columnNames.size(); i++) {
b->writeHeader(b->load_file_name, b->columnNames[i], total_segments);
};
};
};
void delete_records(const char* f) {
CudaSet *a;
a = varNames.find(f)->second;
a->name = f;
size_t totalRemoved = 0;
size_t maxRecs = 0;
if(!a->keep) { // temporary variable
process_error(2, "Delete operator is only applicable to disk based sets\nfor deleting records from derived sets please use filter operator ");
}
else { // read matching segments, delete, compress and write on a disk replacing the original segments
string str, str_old;
queue<string> op_vx;
size_t cnt;
for ( auto it=data_dict[a->load_file_name].begin() ; it != data_dict[a->load_file_name].end(); ++it ) {
op_vx.push((*it).first);
if (std::find(a->columnNames.begin(), a->columnNames.end(), (*it).first) == a->columnNames.end()) {
if ((*it).second.col_type == 0) {
a->type[(*it).first] = 0;
a->decimal[(*it).first] = 0;
//a->h_columns_int[(*it).first] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
a->h_columns_int[(*it).first] = thrust::host_vector<int_type>();
a->d_columns_int[(*it).first] = thrust::device_vector<int_type>();
}
else if((*it).second.col_type == 1) {
a->type[(*it).first] = 1;
a->decimal[(*it).first] = 0;
//a->h_columns_float[(*it).first] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
a->h_columns_float[(*it).first] = thrust::host_vector<float_type>();
a->d_columns_float[(*it).first] = thrust::device_vector<float_type>();
}
else if ((*it).second.col_type == 3) {
a->type[(*it).first] = 1;
a->decimal[(*it).first] = 1;
//a->h_columns_float[(*it).first] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
a->h_columns_float[(*it).first] = thrust::host_vector<float_type>();
a->d_columns_float[(*it).first] = thrust::device_vector<float_type>();
}
else {
a->type[(*it).first] = 2;
a->decimal[(*it).first] = 0;
a->h_columns_char[(*it).first] = nullptr;
a->d_columns_char[(*it).first] = nullptr;
a->char_size[(*it).first] = (*it).second.col_length;
};
a->columnNames.push_back((*it).first);
}
};
allocColumns(a, op_vx);
a->resize(a->maxRecs);
a->prm_d.resize(a->maxRecs);
size_t cc = a->mRecCount;
size_t tmp;
void* d;
CUDA_SAFE_CALL(cudaMalloc((void **) &d, a->maxRecs*float_size));
unsigned int new_seg_count = 0;
char map_check;
for(unsigned int i = 0; i < a->segCount; i++) {
map_check = zone_map_check(op_type,op_value,op_nums, op_nums_f, op_nums_precision, a, i);
if(verbose)
cout << "MAP CHECK segment " << i << " " << map_check << endl;
if(map_check != 'N') {
cnt = 0;
copyColumns(a, op_vx, i, cnt);
tmp = a->mRecCount;
if(a->mRecCount) {
bool* res = filter(op_type,op_value,op_nums, op_nums_f, op_nums_precision, a, i);
thrust::device_ptr<bool> bp((bool*)res);
thrust::copy_if(thrust::make_counting_iterator((unsigned int)0), thrust::make_counting_iterator((unsigned int)a->mRecCount),
bp, a->prm_d.begin(), thrust::logical_not<bool>());
a->mRecCount = thrust::count(bp, bp + (unsigned int)a->mRecCount, 0);
cudaFree(res);
// cout << "Remained recs count " << a->mRecCount << endl;
if(a->mRecCount > maxRecs)
maxRecs = a->mRecCount;
if (a->mRecCount) {
totalRemoved = totalRemoved + (tmp - a->mRecCount);
if (a->mRecCount == tmp) { //none deleted
if(new_seg_count != i) {
for (auto it=data_dict[a->load_file_name].begin() ; it != data_dict[a->load_file_name].end(); ++it ) {
auto colname = (*it).first;
str_old = a->load_file_name + "." + colname + "." + to_string(i);
str = a->load_file_name + "." + colname + "." + to_string(new_seg_count);
remove(str.c_str());
rename(str_old.c_str(), str.c_str());
};
};
new_seg_count++;
}
else { //some deleted
//cout << "writing segment " << new_seg_count << endl;
map<string, col_data> s = data_dict[a->load_file_name];
for ( map<string, col_data>::iterator it=s.begin() ; it != s.end(); ++it ) {
string colname = (*it).first;
str = a->load_file_name + "." + colname + "." + to_string(new_seg_count);
if(a->type[colname] == 0) {
thrust::device_ptr<int_type> d_col((int_type*)d);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_int[colname].begin(), d_col);
pfor_compress( d, a->mRecCount*int_size, str, a->h_columns_int[colname], 0);
}
else if(a->type[colname] == 1) {
thrust::device_ptr<float_type> d_col((float_type*)d);
if(a->decimal[colname]) {
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_float[colname].begin(), d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+a->mRecCount, d_col_dec, float_to_long());
pfor_compress( d, a->mRecCount*float_size, str, a->h_columns_float[colname], 1);
}
else {
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_float[colname].begin(), d_col);
thrust::copy(d_col, d_col + a->mRecCount, a->h_columns_float[colname].begin());
fstream binary_file(str.c_str(),ios::out|ios::binary);
binary_file.write((char *)&a->mRecCount, 4);
binary_file.write((char *)(a->h_columns_float[colname].data()),a->mRecCount*float_size);
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
}
else {
thrust::device_ptr<int_type> d_col((int_type*)d);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_int[colname].begin(), d_col);
pfor_compress( d, a->mRecCount*int_size, str + ".hash", a->h_columns_int[colname], 0);
};
};
new_seg_count++;
};
}
else {
totalRemoved = totalRemoved + tmp;
};
}
}
else {
if(new_seg_count != i) {
for(unsigned int z = 0; z < a->columnNames.size(); z++) {
str_old = a->load_file_name + "." + a->columnNames[z] + "." + to_string(i);
str = a->load_file_name + "." + a->columnNames[z] + "." + to_string(new_seg_count);
remove(str.c_str());
rename(str_old.c_str(), str.c_str());
};
};
new_seg_count++;
maxRecs = a->maxRecs;
};
};
if (new_seg_count < a->segCount) {
for(unsigned int i = new_seg_count; i < a->segCount; i++) {
//cout << "delete segment " << i << endl;
for(unsigned int z = 0; z < a->columnNames.size(); z++) {
str = a->load_file_name + "." + a->columnNames[z];
str += "." + to_string(i);
remove(str.c_str());
};
};
};
for(unsigned int i = new_seg_count; i < a->segCount; i++) {
a->reWriteHeader(a->load_file_name, a->columnNames[i], new_seg_count, a->totalRecs-totalRemoved, maxRecs);
};
a->mRecCount = cc;
a->prm_d.resize(0);
a->segCount = new_seg_count;
a->deAllocOnDevice();
cudaFree(d);
};
};
void save_col_data(map<string, map<string, col_data> >& data_dict, string file_name)
{
size_t str_len;
fstream binary_file(file_name.c_str(),ios::out|ios::binary|ios::trunc);
size_t len = data_dict.size();
binary_file.write((char *)&len, 8);
for (auto it=data_dict.begin() ; it != data_dict.end(); ++it ) {
str_len = (*it).first.size();
binary_file.write((char *)&str_len, 8);
binary_file.write((char *)(*it).first.data(), str_len);
map<string, col_data> s = (*it).second;
size_t len1 = s.size();
binary_file.write((char *)&len1, 8);
for (auto sit=s.begin() ; sit != s.end(); ++sit ) {
str_len = (*sit).first.size();
binary_file.write((char *)&str_len, 8);
binary_file.write((char *)(*sit).first.data(), str_len);
binary_file.write((char *)&(*sit).second.col_type, 4);
binary_file.write((char *)&(*sit).second.col_length, 4);
};
};
binary_file.close();
}
void load_col_data(map<string, map<string, col_data> >& data_dict, string file_name)
{
size_t str_len, recs, len1;
string str1, str2;
char buffer[4000];
unsigned int col_type, col_length;
fstream binary_file;
binary_file.open(file_name.c_str(),ios::in|ios::binary);
if(binary_file.is_open()) {
binary_file.read((char*)&recs, 8);
for(unsigned int i = 0; i < recs; i++) {
binary_file.read((char*)&str_len, 8);
binary_file.read(buffer, str_len);
str1.assign(buffer, str_len);
binary_file.read((char*)&len1, 8);
for(unsigned int j = 0; j < len1; j++) {
binary_file.read((char*)&str_len, 8);
binary_file.read(buffer, str_len);
str2.assign(buffer, str_len);
binary_file.read((char*)&col_type, 4);
binary_file.read((char*)&col_length, 4);
data_dict[str1][str2].col_type = col_type;
data_dict[str1][str2].col_length = col_length;
//cout << "data DICT " << str1 << " " << str2 << " " << col_type << " " << col_length << endl;
};
};
binary_file.close();
}
else {
cout << "Couldn't open data dictionary" << endl;
};
}
bool var_exists(CudaSet* a, string name) {
if(std::find(a->columnNames.begin(), a->columnNames.end(), name) != a->columnNames.end())
return 1;
else
return 0;
}
int file_exist (const char *filename)
{
std::ifstream infile(filename);
return infile.good();
}
bool check_bitmap_file_exist(CudaSet* left, CudaSet* right)
{
queue<string> cols(right->fil_value);
bool bitmaps_exist = 1;
if(cols.size() == 0) {
bitmaps_exist = 0;
};
while(cols.size() ) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), cols.front()) != right->columnNames.end()) {
string fname = left->load_file_name + "." + right->load_file_name + "." + cols.front() + ".0";
if( !file_exist(fname.c_str())) {
bitmaps_exist = 0;
};
};
cols.pop();
};
return bitmaps_exist;
}
bool check_bitmaps_exist(CudaSet* left, CudaSet* right)
{
//check if there are join bitmap indexes
queue<string> cols(right->fil_value);
bool bitmaps_exist = 1;
if(cols.size() == 0) {
bitmaps_exist = 1;
return 1;
};
while(cols.size() ) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), cols.front()) != right->columnNames.end()) {
string fname = left->load_file_name + "." + right->load_file_name + "." + cols.front() + ".0";
if( !file_exist(fname.c_str())) {
bitmaps_exist = 0;
};
};
cols.pop();
};
if(bitmaps_exist) {
while(!right->fil_nums.empty() ) {
left->fil_nums.push(right->fil_nums.front());
right->fil_nums.pop();
};
while(!right->fil_nums_f.empty() ) {
left->fil_nums_f.push(right->fil_nums_f.front());
right->fil_nums_f.pop();
};
while(!right->fil_value.empty() ) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), right->fil_value.front()) != right->columnNames.end()) {
string fname = left->load_file_name + "." + right->load_file_name + "." + right->fil_value.front();
left->fil_value.push(fname);
}
else
left->fil_value.push(right->fil_value.front());
right->fil_value.pop();
};
bool add_and = 1;
if(left->fil_type.empty())
add_and = 0;
while(!right->fil_type.empty() ) {
left->fil_type.push(right->fil_type.front());
right->fil_type.pop();
};
if(add_and) {
left->fil_type.push("AND");
};
return 1;
}
else {
return 0;
};
}
void check_sort(const string str, const char* rtable, const char* rid)
{
CudaSet* right = varNames.find(rtable)->second;
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::app);
binary_file.write((char *)&right->sort_check, 1);
binary_file.close();
}
void update_char_permutation(CudaSet* a, string colname, unsigned int* raw_ptr, string ord, void* temp, bool host)
{
auto s = a->string_map[colname];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
a->h_columns_char[colname] = new char[a->mRecCount*len];
memset(a->h_columns_char[colname], 0, a->mRecCount*len);
thrust::device_ptr<unsigned int> perm(raw_ptr);
thrust::device_ptr<int_type> temp_int((int_type*)temp);
thrust::gather(perm, perm+a->mRecCount, a->d_columns_int[colname].begin(), temp_int);
//for(int z = 0 ; z < a->mRecCount; z++) {
//cout << "Init vals " << a->d_columns_int[colname][z] << " " << perm[z] << " " << temp_int[z] << endl;
//};
//cout << "sz " << a->h_columns_int[colname].size() << " " << a->d_columns_int[colname].size() << " " << len << endl;
cudaMemcpy(thrust::raw_pointer_cast(a->h_columns_int[colname].data()), temp, 8*a->mRecCount, cudaMemcpyDeviceToHost);
FILE *f;
f = fopen(a->string_map[colname].c_str(), "rb");
for(int z = 0 ; z < a->mRecCount; z++) {
fseek(f, a->h_columns_int[colname][z] * len, SEEK_SET);
fread(a->h_columns_char[colname] + z*len, 1, len, f);
};
fclose(f);
if(!host) {
void *d;
cudaMalloc((void **) &d, a->mRecCount*len);
a->d_columns_char[colname] = (char*)d;
cudaMemcpy(a->d_columns_char[colname], a->h_columns_char[colname], len*a->mRecCount, cudaMemcpyHostToDevice);
if (ord.compare("DESC") == 0 )
str_sort(a->d_columns_char[colname], a->mRecCount, raw_ptr, 1, len);
else
str_sort(a->d_columns_char[colname], a->mRecCount, raw_ptr, 0, len);
cudaFree(d);
}
else {
if (ord.compare("DESC") == 0 )
str_sort_host(a->h_columns_char[colname], a->mRecCount, raw_ptr, 1, len);
else
str_sort_host(a->h_columns_char[colname], a->mRecCount, raw_ptr, 0, len);
};
}
time_t add_interval(time_t t, int year, int month, int day, int hour, int minute, int second)
{
if(year) {
struct tm tt = *gmtime (&t);
tt.tm_year = tt.tm_year + year;
return tm_to_time_t_utc(&tt);
}
else if(month) {
struct tm tt = *gmtime (&t);
if(tt.tm_mon + month > 11) {
tt.tm_year++;
tt.tm_mon = ((tt.tm_mon + month) - 11)-1;
}
else
tt.tm_mon = tt.tm_mon + month;
return tm_to_time_t_utc(&tt);
}
else if(day) {
return t + day*24*60*60;
}
else if(hour) {
return t + hour*60*60;
}
else if(minute) {
return t + minute*60;
}
else {
return t + second;
}
}
#ifdef _WIN64
size_t getTotalSystemMemory()
{
MEMORYSTATUSEX status;
status.dwLength = sizeof(status);
GlobalMemoryStatusEx(&status);
return status.ullTotalPhys;
}
#else
size_t getTotalSystemMemory()
{
long pages = sysconf(_SC_PHYS_PAGES);
long page_size = sysconf(_SC_PAGE_SIZE);
return pages * page_size;
}
#endif
|
01cb35d44246d35f5e6cab543be14c837231b0cc.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <fstream>
#include <ctime>
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include <stb/stb_image_write.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <Color.cuh>
#include <Ray.cuh>
#include <helperUtils.cuh>
using namespace TinyRT;
__device__ Color rayColor(const Ray& r) {
const Vec3 unitDirection = unitVec3(r.direction());
const float t = 0.5f * (unitDirection.y() + 1.0f);
return (1.0f - t) * Color(1.0f, 1.0f, 1.0f) + t * Color(0.5f, 0.7f, 1.0f);
}
__global__ void render(
Color* const pixelBuffer,
const int imageWidth,
const int imageHeight,
const Vec3 lowerLeftCorner,
const Vec3 horizontal,
const Vec3 vertical,
const Vec3 origin) {
const int col = threadIdx.x + blockIdx.x * blockDim.x;
const int row = threadIdx.y + blockIdx.y * blockDim.y;
if (col >= imageWidth || row >= imageHeight)
return;
const int idx = row * imageWidth + col;
const auto u = static_cast<float>(col) / static_cast<float>(imageWidth - 1);
const auto v = 1.0f - static_cast<float>(row) / static_cast<float>(imageHeight - 1);
const Ray r(origin, lowerLeftCorner + u * horizontal + v * vertical - origin);
pixelBuffer[idx] = rayColor(r);
}
int main() {
/* image config */
constexpr float aspectRatio = 16.0f / 9.0f;
constexpr int imageWidth = 400;
constexpr int imageHeight = static_cast<int>(imageWidth / aspectRatio);
/* image output file */
const std::string fileName("output.png");
/* thread block config */
constexpr int threadBlockWidth = 8;
constexpr int threadBlockHeight = 8;
// preparation
constexpr int channelNum = 3; // rgb
constexpr int pixelNum = imageWidth * imageHeight;
constexpr size_t pixelBufferBytes = pixelNum * sizeof(Color);
// allocate memory for pixel buffer
const auto pixelBufferPtr = cudaManagedUniquePtr<Color>(pixelBufferBytes);
// camera setting
constexpr float viewPortHeight = 2.0f;
constexpr float viewPortWidth = aspectRatio * viewPortHeight;
constexpr float focalLength = 1.0f;
const Point3 origin(0.0f, 0.0f, 0.0f);
const Vec3 horizontal(viewPortWidth, 0.0f, 0.0f);
const Vec3 vertical(0.0f, viewPortHeight, 0.0f);
// left-handed Y up
const Point3 lowerLeftCorner = origin - horizontal / 2 - vertical / 2 + Vec3(0.0f, 0.0f, focalLength);
// start timer
const clock_t start = clock();
const dim3 blockDim(imageWidth / threadBlockWidth + 1, imageHeight / threadBlockHeight + 1);
const dim3 threadDim(threadBlockWidth, threadBlockHeight);
// render the image into buffer
hipLaunchKernelGGL(( render), dim3(blockDim), dim3(threadDim), 0, 0, pixelBufferPtr.get(), imageWidth, imageHeight, lowerLeftCorner, horizontal, vertical, origin);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
// stop timer
const clock_t stop = clock();
// measure rendering time
const auto renderingMillisecond = stop - start;
// other image writer arguments
constexpr int imageSize = pixelNum * channelNum;
constexpr size_t strideBytes = imageWidth * channelNum * sizeof(unsigned char);
const std::unique_ptr<unsigned char[]> pixelDataPtr(new unsigned char[imageSize]);
// store the pixel data into writing buffer as 8bit color
for (int pixelIdx = 0, dataIdx = 0; pixelIdx < pixelNum; ++pixelIdx) {
const Color color = pixelBufferPtr.get()[pixelIdx];
pixelDataPtr[dataIdx++] = static_cast<unsigned char>(color.r8bit());
pixelDataPtr[dataIdx++] = static_cast<unsigned char>(color.g8bit());
pixelDataPtr[dataIdx++] = static_cast<unsigned char>(color.b8bit());
}
// print rendering time
std::cout << "Complete!\n" << "The rendering took " << renderingMillisecond << "ms" << std::endl;
// write pixel data to output file
stbi_write_png(fileName.c_str(), imageWidth, imageHeight, channelNum, pixelDataPtr.get(), strideBytes);
return 0;
}
|
01cb35d44246d35f5e6cab543be14c837231b0cc.cu
|
#include <iostream>
#include <fstream>
#include <ctime>
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include <stb/stb_image_write.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <Color.cuh>
#include <Ray.cuh>
#include <helperUtils.cuh>
using namespace TinyRT;
__device__ Color rayColor(const Ray& r) {
const Vec3 unitDirection = unitVec3(r.direction());
const float t = 0.5f * (unitDirection.y() + 1.0f);
return (1.0f - t) * Color(1.0f, 1.0f, 1.0f) + t * Color(0.5f, 0.7f, 1.0f);
}
__global__ void render(
Color* const pixelBuffer,
const int imageWidth,
const int imageHeight,
const Vec3 lowerLeftCorner,
const Vec3 horizontal,
const Vec3 vertical,
const Vec3 origin) {
const int col = threadIdx.x + blockIdx.x * blockDim.x;
const int row = threadIdx.y + blockIdx.y * blockDim.y;
if (col >= imageWidth || row >= imageHeight)
return;
const int idx = row * imageWidth + col;
const auto u = static_cast<float>(col) / static_cast<float>(imageWidth - 1);
const auto v = 1.0f - static_cast<float>(row) / static_cast<float>(imageHeight - 1);
const Ray r(origin, lowerLeftCorner + u * horizontal + v * vertical - origin);
pixelBuffer[idx] = rayColor(r);
}
int main() {
/* image config */
constexpr float aspectRatio = 16.0f / 9.0f;
constexpr int imageWidth = 400;
constexpr int imageHeight = static_cast<int>(imageWidth / aspectRatio);
/* image output file */
const std::string fileName("output.png");
/* thread block config */
constexpr int threadBlockWidth = 8;
constexpr int threadBlockHeight = 8;
// preparation
constexpr int channelNum = 3; // rgb
constexpr int pixelNum = imageWidth * imageHeight;
constexpr size_t pixelBufferBytes = pixelNum * sizeof(Color);
// allocate memory for pixel buffer
const auto pixelBufferPtr = cudaManagedUniquePtr<Color>(pixelBufferBytes);
// camera setting
constexpr float viewPortHeight = 2.0f;
constexpr float viewPortWidth = aspectRatio * viewPortHeight;
constexpr float focalLength = 1.0f;
const Point3 origin(0.0f, 0.0f, 0.0f);
const Vec3 horizontal(viewPortWidth, 0.0f, 0.0f);
const Vec3 vertical(0.0f, viewPortHeight, 0.0f);
// left-handed Y up
const Point3 lowerLeftCorner = origin - horizontal / 2 - vertical / 2 + Vec3(0.0f, 0.0f, focalLength);
// start timer
const clock_t start = clock();
const dim3 blockDim(imageWidth / threadBlockWidth + 1, imageHeight / threadBlockHeight + 1);
const dim3 threadDim(threadBlockWidth, threadBlockHeight);
// render the image into buffer
render<<<blockDim, threadDim>>>(pixelBufferPtr.get(), imageWidth, imageHeight, lowerLeftCorner, horizontal, vertical, origin);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
// stop timer
const clock_t stop = clock();
// measure rendering time
const auto renderingMillisecond = stop - start;
// other image writer arguments
constexpr int imageSize = pixelNum * channelNum;
constexpr size_t strideBytes = imageWidth * channelNum * sizeof(unsigned char);
const std::unique_ptr<unsigned char[]> pixelDataPtr(new unsigned char[imageSize]);
// store the pixel data into writing buffer as 8bit color
for (int pixelIdx = 0, dataIdx = 0; pixelIdx < pixelNum; ++pixelIdx) {
const Color color = pixelBufferPtr.get()[pixelIdx];
pixelDataPtr[dataIdx++] = static_cast<unsigned char>(color.r8bit());
pixelDataPtr[dataIdx++] = static_cast<unsigned char>(color.g8bit());
pixelDataPtr[dataIdx++] = static_cast<unsigned char>(color.b8bit());
}
// print rendering time
std::cout << "Complete!\n" << "The rendering took " << renderingMillisecond << "ms" << std::endl;
// write pixel data to output file
stbi_write_png(fileName.c_str(), imageWidth, imageHeight, channelNum, pixelDataPtr.get(), strideBytes);
return 0;
}
|
1189c83928e894a8bbed3861cbf9bb59af962dfe.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "gpu_imageSqrt.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *out = NULL;
hipMalloc(&out, XSIZE*YSIZE);
const float *in = NULL;
hipMalloc(&in, XSIZE*YSIZE);
const int width = 1;
const int height = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
gpu_imageSqrt), dim3(gridBlock),dim3(threadBlock), 0, 0, out,in,width,height);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
gpu_imageSqrt), dim3(gridBlock),dim3(threadBlock), 0, 0, out,in,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
gpu_imageSqrt), dim3(gridBlock),dim3(threadBlock), 0, 0, out,in,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
1189c83928e894a8bbed3861cbf9bb59af962dfe.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "gpu_imageSqrt.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *out = NULL;
cudaMalloc(&out, XSIZE*YSIZE);
const float *in = NULL;
cudaMalloc(&in, XSIZE*YSIZE);
const int width = 1;
const int height = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
gpu_imageSqrt<<<gridBlock,threadBlock>>>(out,in,width,height);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
gpu_imageSqrt<<<gridBlock,threadBlock>>>(out,in,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
gpu_imageSqrt<<<gridBlock,threadBlock>>>(out,in,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
cca071ddc011647e6d29577fe0432dc862f53551.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "gf_gpu.h"
void usage(const char *msg)
{
if(msg)
fprintf(stderr, "ERROR: %s\n", msg);
fprintf(stderr, "usage: gf_gpu w c seed bytes iterations technique\n");
exit(EXIT_FAILURE);
}
int main(int argc, char **argv)
{
if(argc < 7)
usage(NULL);
unsigned w, bytes, iterations;
int seed;
uint64_t c;
if(!sscanf(argv[1], "%u", &w))
usage("Couldn't read w");
if(!sscanf(argv[2], "%lu", &c))
usage("Couldn't read c");
if(!sscanf(argv[3], "%d", &seed))
usage("Couldn't read seed");
if(!sscanf(argv[4], "%u", &bytes))
usage("Couldn't read bytes");
if(!sscanf(argv[5], "%u", &iterations))
usage("Couldn't read iterations");
if(c >= (1ULL << w))
usage("c is not an element of the field");
hipDeviceReset(); //Reset for nvprof
Timer t;
printf("Generating data... ");
startTime(&t);
void *data = random_data(seed, bytes);
void *cuda_data;
hipError_t cuda_ret = hipMalloc((void**)&cuda_data, bytes);
if(cuda_ret != hipSuccess)
usage(hipGetErrorString(cuda_ret));
cuda_ret = hipMemcpy(cuda_data, data, bytes, hipMemcpyHostToDevice);
if(cuda_ret != hipSuccess)
usage(hipGetErrorString(cuda_ret));
hipDeviceSynchronize();
stopTime(&t);
printf("(%f s)\n", elapsedTime(&t));
//Let the kernel launchers time themselves because they have different setups
if(!strcmp(argv[6], "SHIFT"))
for(int i = 0; i < iterations; i++)
shift_launch(w, c, bytes, cuda_data);
else if(!strcmp(argv[6], "TABLE"))
table_launch(w, c, bytes, cuda_data);
else if(!strcmp(argv[6], "LOG"))
log_launch(w, c, bytes, cuda_data);
else if(!strcmp(argv[6], "BYTWO_b"))
bytwob_launch(w, c, bytes, cuda_data);
else if(!strcmp(argv[6], "BYTWO_p"))
bytwop_launch(w, c, bytes, cuda_data);
else if(!strcmp(argv[6], "SPLIT"))
split_launch(w, c, bytes, cuda_data);
else if(!strcmp(argv[6], "GROUP"))
group_launch(w, c, bytes, cuda_data);
else
usage("Invalid technique");
printf("Copying answer to host... ");
startTime(&t);
void *answer = malloc(bytes);
cuda_ret = hipMemcpy(answer, cuda_data, bytes, hipMemcpyDeviceToHost);
if(cuda_ret != hipSuccess)
usage(hipGetErrorString(cuda_ret));
hipDeviceSynchronize();
stopTime(&t);
printf("(%f s)\n", elapsedTime(&t));
if(w == 8)
check_answer_w08(c, bytes, (uint8_t *)data, (uint8_t *)answer);
else if(w == 16)
check_answer_w16(c, bytes/2, (uint16_t *)data, (uint16_t *)answer);
else if(w == 32)
check_answer_w32(c, bytes/4, (uint32_t *)data, (uint32_t *)answer);
free(answer);
free(data);
hipFree(cuda_data);
return 0;
}
|
cca071ddc011647e6d29577fe0432dc862f53551.cu
|
#include "gf_gpu.h"
void usage(const char *msg)
{
if(msg)
fprintf(stderr, "ERROR: %s\n", msg);
fprintf(stderr, "usage: gf_gpu w c seed bytes iterations technique\n");
exit(EXIT_FAILURE);
}
int main(int argc, char **argv)
{
if(argc < 7)
usage(NULL);
unsigned w, bytes, iterations;
int seed;
uint64_t c;
if(!sscanf(argv[1], "%u", &w))
usage("Couldn't read w");
if(!sscanf(argv[2], "%lu", &c))
usage("Couldn't read c");
if(!sscanf(argv[3], "%d", &seed))
usage("Couldn't read seed");
if(!sscanf(argv[4], "%u", &bytes))
usage("Couldn't read bytes");
if(!sscanf(argv[5], "%u", &iterations))
usage("Couldn't read iterations");
if(c >= (1ULL << w))
usage("c is not an element of the field");
cudaDeviceReset(); //Reset for nvprof
Timer t;
printf("Generating data... ");
startTime(&t);
void *data = random_data(seed, bytes);
void *cuda_data;
cudaError_t cuda_ret = cudaMalloc((void**)&cuda_data, bytes);
if(cuda_ret != cudaSuccess)
usage(cudaGetErrorString(cuda_ret));
cuda_ret = cudaMemcpy(cuda_data, data, bytes, cudaMemcpyHostToDevice);
if(cuda_ret != cudaSuccess)
usage(cudaGetErrorString(cuda_ret));
cudaDeviceSynchronize();
stopTime(&t);
printf("(%f s)\n", elapsedTime(&t));
//Let the kernel launchers time themselves because they have different setups
if(!strcmp(argv[6], "SHIFT"))
for(int i = 0; i < iterations; i++)
shift_launch(w, c, bytes, cuda_data);
else if(!strcmp(argv[6], "TABLE"))
table_launch(w, c, bytes, cuda_data);
else if(!strcmp(argv[6], "LOG"))
log_launch(w, c, bytes, cuda_data);
else if(!strcmp(argv[6], "BYTWO_b"))
bytwob_launch(w, c, bytes, cuda_data);
else if(!strcmp(argv[6], "BYTWO_p"))
bytwop_launch(w, c, bytes, cuda_data);
else if(!strcmp(argv[6], "SPLIT"))
split_launch(w, c, bytes, cuda_data);
else if(!strcmp(argv[6], "GROUP"))
group_launch(w, c, bytes, cuda_data);
else
usage("Invalid technique");
printf("Copying answer to host... ");
startTime(&t);
void *answer = malloc(bytes);
cuda_ret = cudaMemcpy(answer, cuda_data, bytes, cudaMemcpyDeviceToHost);
if(cuda_ret != cudaSuccess)
usage(cudaGetErrorString(cuda_ret));
cudaDeviceSynchronize();
stopTime(&t);
printf("(%f s)\n", elapsedTime(&t));
if(w == 8)
check_answer_w08(c, bytes, (uint8_t *)data, (uint8_t *)answer);
else if(w == 16)
check_answer_w16(c, bytes/2, (uint16_t *)data, (uint16_t *)answer);
else if(w == 32)
check_answer_w32(c, bytes/4, (uint32_t *)data, (uint32_t *)answer);
free(answer);
free(data);
cudaFree(cuda_data);
return 0;
}
|
54abbd2ff53630864f4f799745106da4958bf2fb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
__device__ float op_add(float a, float b) {return a+b;}
__device__ float op_sub(float a, float b) {return a-b;}
__device__ float op_mul(float a, float b) {return a*b;}
__device__ float op_div(float a, float b) {return a/b;}
__device__ float op_gt(float a, float b) {return (a > b) ? 1.0f : 0;}
__device__ float op_lt(float a, float b) {return (a < b) ? 1.0f : 0;}
__device__ float op_eq(float a, float b) {return (a == b) ? 1.0f : 0;}
__device__ float op_ge(float a, float b) {return (a >= b) ? 1.0f : 0;}
__device__ float op_le(float a, float b) {return (a <= b) ? 1.0f : 0;}
__device__ float op_ne(float a, float b) {return (a != b) ? 1.0f : 0;}
__device__ float op_max(float a, float b) {return max(a,b);}
__device__ float op_min(float a, float b) {return min(a,b);}
__device__ int iop_add(int a, int b) {return a+b;}
__device__ int iop_sub(int a, int b) {return a-b;}
__device__ int iop_mul(int a, int b) {return a*b;}
__device__ int iop_div(int a, int b) {return a/b;}
__device__ int iop_gt(int a, int b) {return (a > b) ? 1 : 0;}
__device__ int iop_lt(int a, int b) {return (a < b) ? 1 : 0;}
__device__ int iop_eq(int a, int b) {return (a == b) ? 1 : 0;}
__device__ int iop_ge(int a, int b) {return (a >= b) ? 1 : 0;}
__device__ int iop_le(int a, int b) {return (a <= b) ? 1 : 0;}
__device__ int iop_ne(int a, int b) {return (a != b) ? 1 : 0;}
typedef float (*optype)(float,float);
typedef int (*ioptype)(int,int);
__device__ const optype operators[] = {
op_add,
op_sub,
op_mul,
op_div,
op_gt,
op_lt,
op_eq,
op_ge,
op_le,
op_ne,
op_max,
op_min};
__device__ const ioptype ioperators[] = {
iop_add,
iop_sub,
iop_mul,
iop_div,
iop_gt,
iop_lt,
iop_eq,
iop_ge,
iop_le,
iop_ne};
__device__ float fn_abs(float a) {return abs(a);}
__device__ float fn_exp(float a) {return expf(a);}
__device__ float fn_log(float a) {return logf(a);}
__device__ float fn_expm1(float a) {return expm1f(a);}
__device__ float fn_sqrt(float a) {return sqrtf(a);}
__device__ float fn_ln(float a) {return logf(a);}
__device__ float fn_log10(float a) {return log10f(a);}
__device__ float fn_log1p(float a) {return log1pf(a);}
__device__ float fn_cos(float a) {return cosf(a);}
__device__ float fn_sin(float a) {return sinf(a);}
__device__ float fn_tan(float a) {return tanf(a);}
__device__ float fn_cosh(float a) {return coshf(a);}
__device__ float fn_sinh(float a) {return sinhf(a);}
__device__ float fn_tanh(float a) {return tanhf(a);}
__device__ float fn_acos(float a) {return acosf(a);}
__device__ float fn_asin(float a) {return asinf(a);}
__device__ float fn_atan(float a) {return atanf(a);}
__device__ float fn_acosh(float a) {return acoshf(a);}
__device__ float fn_asinh(float a) {return asinhf(a);}
__device__ float fn_atanh(float a) {return atanhf(a);}
__device__ float fn_erf(float a) {return erff(a);}
__device__ float fn_erfinv(float a) {return erfinvf(a);}
__device__ float fn_erfc(float a) {return erfcf(a);}
__device__ float fn_erfcinv(float a) {return erfcinvf(a);}
__device__ float fn_gammaln(float a) {return lgammaf(a);}
__device__ float fn_gamma(float a) {return tgammaf(a);}
__device__ float fn_ceil(float a) {return ceilf(a);}
__device__ float fn_floor(float a) {return floorf(a);}
__device__ float fn_round(float a) {return roundf(a);}
__device__ float fn_trunc(float a) {return truncf(a);}
__device__ float fn_sign(float a) {return (a>0) ? 1.0f : ((a<0) ? -1.0f : 0);}
__device__ float fn_j0(float a) {return j0f(a);}
__device__ float fn_j1(float a) {return j1f(a);}
//__device__ float fn_jn(float a) {return jnf(a);}
__device__ float fn_y0(float a) {return y0f(a);}
__device__ float fn_y1(float a) {return y1f(a);}
//__device__ float fn_yn(float a) {return ynf(a);}
__device__ float fn_exppsi(float a) {return (a<1.0f) ? 0.5f*a*a : a-0.5f;}
__device__ float fn_atan2(float a, float b) {return atan2f(a, b);}
__device__ float fn_pow(float a, float b) {return powf(a, b);}
typedef float (*fntype)(float);
__device__ const fntype fctns[35] = {
fn_abs,
fn_exp,
fn_expm1,
fn_sqrt,
fn_ln,
fn_log10,
fn_log1p,
fn_cos,
fn_sin,
fn_tan,
fn_cosh,
fn_sinh,
fn_tanh,
fn_acos,
fn_asin,
fn_atan,
fn_acosh,
fn_asinh,
fn_atanh,
fn_erf,
fn_erfinv,
fn_erfc,
fn_erfcinv,
fn_gammaln,
fn_gamma,
fn_ceil,
fn_floor,
fn_round,
fn_trunc,
fn_sign,
fn_j0,
fn_j1,
fn_y0,
fn_y1,
fn_exppsi};
__device__ const optype fctns2[2] = {
fn_atan2,
fn_pow};
__global__ void __apply_gfun(float *A, float *B, int N, int opn) {
fntype fn = fctns[opn];
int ip = threadIdx.x + blockDim.x * blockIdx.x;
for (int i = ip; i < N; i += blockDim.x * gridDim.x) {
B[i] = fn(A[i]);
}
}
int apply_gfun(float *A, float *B, int N, int opn) {
int nthreads = 32;
int nblocks = 1;
while (nblocks * nthreads < N) {
if (nblocks < 16) {
nblocks = 2*nblocks;
} else if (nthreads < 1024) {
nthreads = 2*nthreads;
} else {
nblocks = 2*nblocks;
}
}
hipLaunchKernelGGL(( __apply_gfun), dim3(nblocks),dim3(nthreads), 0, 0, A, B, N, opn);
hipError_t err = hipGetLastError();
return err;
}
__global__ void __apply_gfun2(float *A, float *B, float *C, int N, int opn) {
optype fn = fctns2[opn];
int ip = threadIdx.x + blockDim.x * blockIdx.x;
for (int i = ip; i < N; i += blockDim.x * gridDim.x) {
C[i] = fn(A[i], B[i]);
}
}
int apply_gfun2(float *A, float *B, float *C, int N, int opn) {
int nthreads = 32;
int nblocks = 1;
while (nblocks * nthreads < N) {
if (nblocks < 16) {
nblocks = 2*nblocks;
} else if (nthreads < 1024) {
nthreads = 2*nthreads;
} else {
nblocks = 2*nblocks;
}
}
hipLaunchKernelGGL(( __apply_gfun2), dim3(nblocks),dim3(nthreads), 0, 0, A, B, C, N, opn);
hipError_t err = hipGetLastError();
return err;
}
__global__ void __apply_full(float *A, float *B, float *C, int N, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * blockIdx.x;
for (int i = ip; i < N; i += blockDim.x * gridDim.x) {
C[i] = op(A[i],B[i]);
}
}
__global__ void __apply_right_col(float *A, float *B, float *C, int nrows, int ncols, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * blockIdx.x;
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x) {
C[i] = op(A[i],B[i % nrows]);
}
}
__global__ void __apply_right_row(float *A, float *B, float *C, int nrows, int ncols, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * blockIdx.x;
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x) {
C[i] = op(A[i],B[i / nrows]);
}
}
__global__ void __apply_left_col(float *A, float *B, float *C, int nrows, int ncols, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * blockIdx.x;
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x) {
C[i] = op(A[i % nrows],B[i]);
}
}
__global__ void __apply_left_row(float *A, float *B, float *C, int nrows, int ncols, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * blockIdx.x;
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x) {
C[i] = op(A[i / nrows],B[i]);
}
}
__global__ void __apply_right_val(float *A, float *B, float *C, int nrows, int ncols, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * blockIdx.x;
float val = B[0];
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x) {
C[i] = op(A[i],val);
}
}
__global__ void __apply_left_val(float *A, float *B, float *C, int nrows, int ncols, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * blockIdx.x;
float val = A[0];
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x) {
C[i] = op(val,B[i]);
}
}
int apply_binop(float *A, int Anrows, int Ancols,
float *B, int Bnrows, int Bncols, float *C, int opn) {
int N = max(Anrows, Bnrows)*max(Ancols, Bncols);
int nthreads = 32;
int nblocks = 1;
while (nblocks * nthreads < N) {
if (nblocks < 16) {
nblocks = 2*nblocks;
} else if (nthreads < 1024) {
nthreads = 2*nthreads;
} else {
nblocks = 2*nblocks;
}
}
if (Anrows == Bnrows && Ancols == Bncols) {
hipLaunchKernelGGL(( __apply_full), dim3(nblocks),dim3(nthreads), 0, 0, A, B, C, N, opn);
} else if (Anrows == Bnrows && Bncols == 1) {
hipLaunchKernelGGL(( __apply_right_col), dim3(nblocks),dim3(nthreads), 0, 0, A, B, C, Anrows, Ancols, opn);
} else if (Ancols == Bncols && Bnrows == 1) {
hipLaunchKernelGGL(( __apply_right_row), dim3(nblocks),dim3(nthreads), 0, 0, A, B, C, Anrows, Ancols, opn);
} else if (Anrows == Bnrows && Ancols == 1) {
hipLaunchKernelGGL(( __apply_left_col), dim3(nblocks),dim3(nthreads), 0, 0, A, B, C, Bnrows, Bncols, opn);
} else if (Ancols == Bncols && Anrows == 1) {
hipLaunchKernelGGL(( __apply_left_row), dim3(nblocks),dim3(nthreads), 0, 0, A, B, C, Bnrows, Bncols, opn);
} else if (Bnrows == 1 && Bncols == 1) {
hipLaunchKernelGGL(( __apply_right_val), dim3(nblocks),dim3(nthreads), 0, 0, A, B, C, Anrows, Ancols, opn);
} else if (Anrows == 1 && Ancols == 1) {
hipLaunchKernelGGL(( __apply_left_val), dim3(nblocks),dim3(nthreads), 0, 0, A, B, C, Bnrows, Bncols, opn);
}
hipError_t err = hipGetLastError();
return err;
}
__global__ void __apply_full_int(int *A, int *B, int *C, int N, int opn) {
ioptype op = ioperators[opn];
int ip = threadIdx.x + blockDim.x * blockIdx.x;
for (int i = ip; i < N; i += blockDim.x * gridDim.x) {
C[i] = op(A[i],B[i]);
}
}
__global__ void __apply_right_col_int(int *A, int *B, int *C, int nrows, int ncols, int opn) {
ioptype op = ioperators[opn];
int ip = threadIdx.x + blockDim.x * blockIdx.x;
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x) {
C[i] = op(A[i],B[i % nrows]);
}
}
__global__ void __apply_right_row_int(int *A, int *B, int *C, int nrows, int ncols, int opn) {
ioptype op = ioperators[opn];
int ip = threadIdx.x + blockDim.x * blockIdx.x;
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x) {
C[i] = op(A[i],B[i / nrows]);
}
}
__global__ void __apply_left_col_int(int *A, int *B, int *C, int nrows, int ncols, int opn) {
ioptype op = ioperators[opn];
int ip = threadIdx.x + blockDim.x * blockIdx.x;
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x) {
C[i] = op(A[i % nrows],B[i]);
}
}
__global__ void __apply_left_row_int(int *A, int *B, int *C, int nrows, int ncols, int opn) {
ioptype op = ioperators[opn];
int ip = threadIdx.x + blockDim.x * blockIdx.x;
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x) {
C[i] = op(A[i / nrows],B[i]);
}
}
__global__ void __apply_right_val_int(int *A, int *B, int *C, int nrows, int ncols, int opn) {
ioptype op = ioperators[opn];
int ip = threadIdx.x + blockDim.x * blockIdx.x;
int val = B[0];
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x) {
C[i] = op(A[i],val);
}
}
__global__ void __apply_left_val_int(int *A, int *B, int *C, int nrows, int ncols, int opn) {
ioptype op = ioperators[opn];
int ip = threadIdx.x + blockDim.x * blockIdx.x;
int val = A[0];
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x) {
C[i] = op(val,B[i]);
}
}
int apply_biniop(int *A, int Anrows, int Ancols,
int *B, int Bnrows, int Bncols,
int *C, int opn) {
int N = max(Anrows, Bnrows)*max(Ancols, Bncols);
int nthreads = 32;
int nblocks = 1;
while (nblocks * nthreads < N) {
if (nblocks < 16) {
nblocks = 2*nblocks;
} else if (nthreads < 1024) {
nthreads = 2*nthreads;
} else {
nblocks = 2*nblocks;
}
}
if (Anrows == Bnrows && Ancols == Bncols) {
hipLaunchKernelGGL(( __apply_full_int), dim3(nblocks),dim3(nthreads), 0, 0, A, B, C, N, opn);
} else if (Anrows == Bnrows && Bncols == 1) {
hipLaunchKernelGGL(( __apply_right_col_int), dim3(nblocks),dim3(nthreads), 0, 0, A, B, C, Anrows, Ancols, opn);
} else if (Ancols == Bncols && Bnrows == 1) {
hipLaunchKernelGGL(( __apply_right_row_int), dim3(nblocks),dim3(nthreads), 0, 0, A, B, C, Anrows, Ancols, opn);
} else if (Anrows == Bnrows && Ancols == 1) {
hipLaunchKernelGGL(( __apply_left_col_int), dim3(nblocks),dim3(nthreads), 0, 0, A, B, C, Bnrows, Bncols, opn);
} else if (Ancols == Bncols && Anrows == 1) {
hipLaunchKernelGGL(( __apply_left_row_int), dim3(nblocks),dim3(nthreads), 0, 0, A, B, C, Bnrows, Bncols, opn);
} else if (Bnrows == 1 && Bncols == 1) {
hipLaunchKernelGGL(( __apply_right_val_int), dim3(nblocks),dim3(nthreads), 0, 0, A, B, C, Anrows, Ancols, opn);
} else if (Anrows == 1 && Ancols == 1) {
hipLaunchKernelGGL(( __apply_left_val_int), dim3(nblocks),dim3(nthreads), 0, 0, A, B, C, Bnrows, Bncols, opn);
}
hipError_t err = hipGetLastError();
return err;
}
__global__ void __dsmult(int nrows, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
for (int i = threadIdx.x; i < nrows; i += blockDim.x) {
float sum = 0;
for (int j = jstart; j < jend ; j++) {
sum += A[i + nrows * Bir[j]] * Bdata[j];
if (j == jend-1 || Bic[j] != Bic[j+1]) {
atomicAdd(&C[i + nrows * Bic[j]], sum);
sum = 0;
}
}
}
}
int dsmult(int nrows, int ncols, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C) {
int nthreads = min(1024, nrows);
int nblocks = min(1024*1024, ncols);
hipLaunchKernelGGL(( __dsmult), dim3(nblocks),dim3(nthreads), 0, 0, nrows, nnz, A, Bdata, Bir, Bic, C);
hipError_t err = hipGetLastError();
return err;
}
__global__ void __dsmultT(int nrows, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
for (int i = threadIdx.x; i < nrows; i += blockDim.x) {
float aval = 0;
for (int j = jstart; j < jend ; j++) {
if (j == jstart || Bic[j-1] != Bic[j]) {
aval = A[i + nrows * Bic[j]];
}
atomicAdd(&C[i + nrows * Bir[j]], aval * Bdata[j]);
}
}
}
int dsmultT(int nrows, int ncols, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C) {
int nthreads = min(1024, nrows);
int nblocks = min(1024*1024, ncols);
hipLaunchKernelGGL(( __dsmultT), dim3(nblocks),dim3(nthreads), 0, 0, nrows, nnz, A, Bdata, Bir, Bic, C);
hipError_t err = hipGetLastError();
return err;
}
__global__ void __dds(int nrows, int nnz, float *A, float *B, int *Cir, int *Cic, float *P);
__global__ void __reduce1op(int nrows, int ncols, float *A, float *B, int opn);
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ > 200
__global__ void __dds(int nrows, int nnz, float *A, float *B, int *Cir, int *Cic, float *P) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
for (int j = jstart; j < jend ; j++) {
float sum = 0;
int aoff = nrows * Cir[j];
int boff = nrows * Cic[j];
for (int i = threadIdx.x; i < nrows; i += blockDim.x) {
sum += A[i + aoff] * B[i + boff];
}
for (int i = 1; i < blockDim.x; i *= 2) {
sum = sum + __shfl_down(sum, i);
}
if (threadIdx.x == 0) {
P[j] = sum;
}
}
}
__global__ void __reduce1op(int nrows, int ncols, float *A, float *B, int opn) {
optype op = operators[opn];
int basecol = threadIdx.y + blockDim.y * blockIdx.x;
for (int icol = basecol; icol < ncols; icol += blockDim.y * gridDim.x) {
float v = A[threadIdx.x + icol * nrows];
for (int i = threadIdx.x + blockDim.x; i < nrows; i += blockDim.x) {
v = op(v, A[i + icol * nrows]);
}
for (int i = 1; i < blockDim.x; i *= 2) {
v = op(v, __shfl_down(v, i));
}
if (threadIdx.x == 0) {
B[icol] = v;
}
}
}
#else
__global__ void __dds(int nrows, int nnz, float *A, float *B, int *Cir, int *Cic, float *P) {
__shared__ float parts[1][33];
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
for (int j = jstart; j < jend ; j++) {
float sum = 0;
int aoff = nrows * Cir[j];
int boff = nrows * Cic[j];
for (int i = threadIdx.x; i < nrows; i += blockDim.x) {
sum += A[i + aoff] * B[i + boff];
}
parts[0][threadIdx.x] = sum;
for (int i = 1; i < blockDim.x; i *= 2) {
if (i + threadIdx.x < blockDim.x) {
parts[0][threadIdx.x] = parts[0][threadIdx.x] + parts[0][i + threadIdx.x];
}
}
if (threadIdx.x == 0) {
P[j] = parts[0][0];
}
}
}
__global__ void __reduce1op(int nrows, int ncols, float *A, float *B, int opn) {
__shared__ float parts[32][33];
optype op = operators[opn];
for (int icol = threadIdx.y + blockIdx.y * blockDim.y; icol < ncols; icol += blockDim.y * gridDim.y) {
float v = A[threadIdx.x + icol * nrows];
for (int irow = threadIdx.x + blockDim.x; irow < nrows; irow += blockDim.x) {
v = op(v, A[irow + icol * nrows]);
}
parts[threadIdx.x][threadIdx.y] = v;
for (int i = 1; i < blockDim.x; i *= 2) {
if (i + threadIdx.x < blockDim.x) {
parts[threadIdx.x][threadIdx.y] = op(parts[threadIdx.x][threadIdx.y], parts[i + threadIdx.x][threadIdx.y]);
}
}
if (threadIdx.x == 0) {
B[icol] = parts[0][threadIdx.y];
}
__syncthreads();
}
}
#endif
#endif
#define BLOCKDIM 32
__global__ void __transpose(float *in, int instride, float *out, int outstride, int nrows, int ncols) {
int nx = BLOCKDIM * gridDim.x;
int ny = BLOCKDIM * gridDim.y;
int ix = BLOCKDIM * blockIdx.x;
int iy = BLOCKDIM * blockIdx.y;
__shared__ float tile[BLOCKDIM][BLOCKDIM+1];
for (int yb = iy; yb < ncols; yb += ny) {
for (int xb = ix; xb < nrows; xb += nx) {
if (xb + threadIdx.x < nrows) {
int ylim = min(ncols, yb + BLOCKDIM);
for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) {
tile[threadIdx.x][y-yb] = in[threadIdx.x+xb + y*instride];
}
}
__syncthreads();
if (yb + threadIdx.x < ncols) {
int xlim = min(nrows, xb + BLOCKDIM);
for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) {
out[threadIdx.x + yb + x*outstride] = tile[x-xb][threadIdx.x];
}
}
__syncthreads();
}
}
}
int transpose(float *in, int instride, float *out, int outstride, int nrows, int ncols) {
const dim3 griddims(32,32);
const dim3 blockdims(BLOCKDIM,16,1);
hipError_t err;
hipLaunchKernelGGL(( __transpose), dim3(griddims),dim3(blockdims), 0, 0, in, instride, out, outstride, nrows, ncols);
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {fprintf(stderr, "cuda error in transpose"); return err;}
return 0;
}
int dds(int nrows, int nnz, float *A, float *B, int *Cir, int *Cic, float *P) {
int nthreads = min(32, nrows);
int nblocks = min(32*1024*1024, max(1,nnz/8));
hipLaunchKernelGGL(( __dds), dim3(nblocks),dim3(nthreads), 0, 0, nrows, nnz, A, B, Cir, Cic, P);
hipError_t err = hipGetLastError();
return err;
}
int reduce1op(int nrows, int ncols, float *A, float *B, int opn) {
int blkx = min(32, nrows);
int blky = min(32, ncols);
int nblks = max(1, ((int)(((long long)nrows) * ncols / blkx / blky / 16)));
const dim3 blkdims(blkx,blky,1);
const dim3 griddims(1,nblks,1);
hipLaunchKernelGGL(( __reduce1op), dim3(griddims),dim3(blkdims), 0, 0, nrows, ncols, A, B, opn);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __reduce2op(int nrows, int ncols, float *A, float *B, int opn) {
__shared__ float parts[32][33];
optype op = operators[opn];
int baserow = threadIdx.x + blockDim.x * blockIdx.x;
for (int irow = baserow; irow < nrows; irow += blockDim.x * gridDim.x) {
float v = A[irow + threadIdx.y * nrows];
for (int icol = threadIdx.y + blockDim.y; icol < ncols; icol += blockDim.y) {
v = op(v, A[irow + icol * nrows]);
}
parts[threadIdx.x][threadIdx.y] = v;
__syncthreads();
float newv = 0;
for (int i = 1; i < blockDim.y; i *= 2) {
if (i + threadIdx.y < blockDim.y) newv = parts[threadIdx.x][i+threadIdx.y];
__syncthreads();
if (i + threadIdx.y < blockDim.y) parts[threadIdx.x][threadIdx.y] = op(parts[threadIdx.x][threadIdx.y], newv);
__syncthreads();
}
if (threadIdx.y == 0) {
B[irow] = parts[threadIdx.x][0];
}
__syncthreads();
}
}
int reduce2op(int nrows, int ncols, float *A, float *B, int opn) {
int blkx = min(32, nrows);
int blky = min(32, ncols);
int nblks = max(1, ((int)(((long long)nrows) * ncols / blkx / blky / 16)));
const dim3 blkdims(blkx,blky,1);
const dim3 griddims(nblks,1,1);
hipLaunchKernelGGL(( __reduce2op), dim3(griddims),dim3(blkdims), 0, 0, nrows, ncols, A, B, opn);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __embedmat(float *a, long long *b, int nrows, int ncols) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < nrows*ncols; i += blockDim.x*gridDim.x) {
float v = a[i];
int vi = *((int *)&v);
int mask = (vi >> 31) | 0x80000000;
vi = vi ^ mask;
b[i] = (long long)vi + (((long long)(i/nrows))<<32);
}
}
__global__ void __extractmat(float *a, long long *b, int nrows, int ncols) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < nrows*ncols; i += blockDim.x*gridDim.x) {
long long v = b[i];
int vi = *((int *)&v);
int mask = (~(vi >> 31)) | 0x80000000;
vi = vi ^ mask;
a[i] = *((float *)&vi);
}
}
void setsizes(int N, int *nblocksp, int *nthreadsp) {
int nblocks = 32;
int nthreads = 1;
while (nblocks * nthreads < N) {
if (nblocks < 16) {
nblocks = 2*nblocks;
} else if (nthreads < 1024) {
nthreads = 2*nthreads;
} else {
nblocks = 2*nblocks;
}
}
*nblocksp = nblocks;
*nthreadsp = nthreads;
}
int embedmat(float *a, long long *b, int nrows, int ncols) {
int nthreads;
int nblocks;
setsizes(nrows*ncols, &nblocks, &nthreads);
hipLaunchKernelGGL(( __embedmat), dim3(nblocks),dim3(nthreads), 0, 0, a, b, nrows, ncols);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
int extractmat(float *a, long long *b, int nrows, int ncols) {
int nthreads;
int nblocks;
setsizes(nrows*ncols, &nblocks, &nthreads);
hipLaunchKernelGGL(( __extractmat), dim3(nblocks),dim3(nthreads), 0, 0, a, b, nrows, ncols);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
//#include <thrust/detail/backend/cuda/detail/b40c/radixsort_api.h>
//#include "myradix_sort.inl"
#include <thrust/sort.h>
#include <thrust/device_ptr.h>
#include <thrust/reverse.h>
int rsortsizex(int N) {
thrust::detail::backend::cuda::detail::b40c_thrust::RadixSortingEnactor<float,unsigned int> sorter(N);
return sorter.SpineElements();
}
int rsortsizey(int N) {
thrust::detail::backend::cuda::detail::b40c_thrust::RadixSortingEnactor<long long,unsigned int> sorter(N);
return sorter.SpineElements();
}
int rsortx(float *pkeys, unsigned int *pvals, float *tkeys, unsigned int *tvals,
int *ispine, bool * bflags, int nrows, int ncols) {
int i;
hipError_t err;
thrust::detail::backend::cuda::detail::b40c_thrust::RadixSortingEnactor<float,unsigned int> sorter(nrows);
thrust::detail::backend::cuda::detail::b40c_thrust::RadixSortStorage<float,unsigned int> storage;
storage.d_alt_keys = tkeys;
storage.d_alt_values = tvals;
storage.d_spine = ispine;
storage.d_from_alt_storage = bflags;
for (i = 0; i < ncols; i++) {
storage.d_keys = pkeys+i*nrows;
storage.d_values = pvals+i*nrows;
sorter.EnactSort(storage);
hipDeviceSynchronize();
err = hipGetLastError();
}
return err;
}
int rsorty(long long *pkeys, unsigned int *pvals, long long *tkeys, unsigned int *tvals, int *ispine, bool * bflags, int N) {
thrust::detail::backend::cuda::detail::b40c_thrust::RadixSortingEnactor<long long,unsigned int> sorter(N);
thrust::detail::backend::cuda::detail::b40c_thrust::RadixSortStorage<long long,unsigned int> storage;
storage.d_keys = pkeys;
storage.d_values = pvals;
storage.d_alt_keys = tkeys;
storage.d_alt_values = tvals;
storage.d_spine = ispine;
storage.d_from_alt_storage = bflags;
sorter.EnactSort(storage);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
int rsort(long long *pkeys, unsigned int *pvals, int N, int dev) {
hipSetDevice(dev);
thrust::device_ptr<long long> keys(pkeys);
thrust::device_ptr<unsigned int> vals(pvals);
thrust::sort_by_key(keys, keys + N, vals);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
int rsort2(float *pkeys, unsigned int *pvals, int nrows, int ncols) {
for (int i = 0; i < ncols; i++) {
thrust::device_ptr<float> keys(pkeys+i*nrows);
thrust::device_ptr<unsigned int> vals(pvals+i*nrows);
thrust::sort_by_key(keys, keys + nrows, vals);
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __stratify(float *strata, int n, float *a, float *b, unsigned int *bi, int stride) {
__shared__ float ss[32];
__shared__ unsigned int ibin[32];
__shared__ unsigned int ebin[32];
__shared__ unsigned int todo[32];
__shared__ float bins[64][33];
__shared__ unsigned int topush;
int tid = threadIdx.x;
ss[tid] = strata[tid];
ibin[tid] = 0;
for (int i = 0; i < n; i += blockDim.x * gridDim.x) {
int ii = i + tid + blockDim.x * blockIdx.x;
if (tid == 0) topush = 0;
if (ii < n) {
float v = a[ii];
int j = 1;
j = (v > ss[j-1]) ? 2*j+1 : 2*j;
j = (v > ss[j-1]) ? 2*j+1 : 2*j;
j = (v > ss[j-1]) ? 2*j+1 : 2*j;
j = (v > ss[j-1]) ? 2*j+1 : 2*j;
j = (v > ss[j-1]) ? 2*j+1 : 2*j;
j = j - 32;
int k = atomicInc(&ibin[j], 256);
bins[k][j] = v;
if (k == 31) {
k = atomicInc(&topush, 1024);
todo[k] = j;
}
}
if (ibin[tid] >= 32) {
ebin[tid] = atomicAdd(&bi[tid], 32);
ibin[tid] = ibin[tid] - 32;
}
for (int k = 0; k < topush; k++) {
int j = todo[k];
b[j*stride + ebin[j] + tid] = bins[ibin[j] + tid][j];
}
}
ebin[tid] = atomicAdd(&bi[tid], ibin[tid]);
for (int j = 0; j < 32; j++) {
if (tid < ibin[j]) {
b[j*stride + ebin[j] + tid] = bins[tid][j];
}
}
}
int stratify(float *strata, int n, float *a, float *b, unsigned int *bi, int stride) {
hipLaunchKernelGGL(( __stratify), dim3(40),dim3(32), 0, 0, strata, n, a, b, bi, stride);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
#define SNDVALS 256
#define SNDGRPS 4
#define SNTHREADS 1024
#define SBIGBLK (4*1024)
__global__ void __stratifycounts(float *strata, int n, float *a, unsigned int *bi) {
__shared__ unsigned int ic[SNDVALS][SNDGRPS];
__shared__ float ss[SNDVALS];
int istart = (int)(((long long)blockIdx.x) * n / gridDim.x);
int iend = (int)(((long long)(blockIdx.x+1)) * n / gridDim.x);
int bibase = SNDVALS * (blockIdx.x + istart / SBIGBLK);
int tid = threadIdx.x + threadIdx.y * blockDim.x;
if (threadIdx.y == 0) {
ss[threadIdx.x] = strata[threadIdx.x];
}
for (int i = istart; i < iend; i += SBIGBLK) {
__syncthreads();
if (threadIdx.y < SNDGRPS) {
ic[threadIdx.x][threadIdx.y] = 0;
}
__syncthreads();
for (int k = i + tid; k < min(iend, i + tid + SBIGBLK); k += SNTHREADS) {
float v = a[k];
int j = 0;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = j - SNDVALS + 1;
atomicInc(&ic[j][threadIdx.y], 65536*32767);
}
__syncthreads();
if (threadIdx.y == 0) {
bi[bibase + threadIdx.x] = ic[threadIdx.x][0] + ic[threadIdx.x][1] + ic[threadIdx.x][2] + ic[threadIdx.x][3];
}
bibase += SNDVALS;
}
}
int stratifycounts(float *strata, int n, float *a, unsigned int *bi) {
const dim3 blockdims(SNDVALS, SNTHREADS/SNDVALS, 1);
const dim3 griddims(8,1,1);
hipLaunchKernelGGL(( __stratifycounts), dim3(griddims),dim3(blockdims), 0, 0, strata, n, a, bi);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
#define RNDVALS 256
#define RNTHREADS 256
#define RNDBITS 8
#define RBIGBLK (4*1024)
__global__ void __radixcounts(float *a, int n, int digit, unsigned int *bi) {
__shared__ unsigned int ic[RNDVALS];
int istart = (int)(((long long)blockIdx.x) * n / gridDim.x);
int iend = (int)(((long long)(blockIdx.x+1)) * n / gridDim.x);
int tid = threadIdx.x;
int bibase = RNDVALS * (blockIdx.x + istart / RBIGBLK);
for (int i = istart; i < iend; i += RBIGBLK) {
__syncthreads();
ic[threadIdx.x] = 0;
__syncthreads();
for (int j = i + tid; j < min(iend, i+tid+RBIGBLK); j += RNTHREADS) {
float v = a[j];
unsigned char *cv = (unsigned char *)&v;
atomicInc(&ic[cv[digit]], 65536*32767);
}
__syncthreads();
bi[bibase + threadIdx.x] = ic[threadIdx.x];
bibase += RNDVALS;
}
}
int radixcounts(float *a, int n, int digit, unsigned int *bi) {
const dim3 blockdims(RNTHREADS,1,1);
const dim3 griddims(32,1,1);
hipLaunchKernelGGL(( __radixcounts), dim3(griddims),dim3(blockdims), 0, 0, a, n, digit, bi);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
#ifdef TEST
int main(int argc, char **argv) {
int m=8, n=8, opn = 0;
float *dA, *dB, *dC, *A, *B, *C;
if (argc > 1) {
sscanf(argv[1], "%d", &opn);
if (argc > 2) {
sscanf(argv[2], "%d", &m);
if (argc > 3) {
sscanf(argv[3], "%d", &n);
}
}
}
A = (float *)malloc(m*n*sizeof(float));
B = (float *)malloc(m*n*sizeof(float));
C = (float *)malloc(m*n*sizeof(float));
hipMalloc((void**)&dA, m*n*sizeof(float));
hipMalloc((void**)&dB, m*n*sizeof(float));
hipMalloc((void**)&dC, m*n*sizeof(float));
for (int i = 0; i < m*n; i++) {
A[i] = 1.0f;
B[i] = 2.0f;
}
hipMemcpy(dA, A, m*n*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dB, B, m*n*sizeof(float), hipMemcpyHostToDevice);
printf("A %f %f %f %f\n", A[0], A[1], A[2], A[3]);
printf("B %f %f %f %f\n", B[0], B[1], B[2], B[3]);
MatKernel(dA, m, n, dB, m, n, dC, opn);
hipError_t err = hipGetLastError();
if( hipSuccess != err) {
fprintf(stderr, "CUDA error %d", err);
exit(1);
}
hipMemcpy(C, dC, m*n*sizeof(float), hipMemcpyDeviceToHost);
printf("C %f %f %f %f\n", C[0], C[1], C[2], C[3]);
printf("A %f %f %f %f\n", A[0], A[1], A[2], A[3]);
printf("B %f %f %f %f\n", B[0], B[1], B[2], B[3]);
if (dA != NULL) hipFree(dA);
if (dB != NULL) hipFree(dB);
if (dC != NULL) hipFree(dC);
if (C != NULL) free(C);
}
#endif
|
54abbd2ff53630864f4f799745106da4958bf2fb.cu
|
#include <cuda_runtime.h>
#include <stdio.h>
__device__ float op_add(float a, float b) {return a+b;}
__device__ float op_sub(float a, float b) {return a-b;}
__device__ float op_mul(float a, float b) {return a*b;}
__device__ float op_div(float a, float b) {return a/b;}
__device__ float op_gt(float a, float b) {return (a > b) ? 1.0f : 0;}
__device__ float op_lt(float a, float b) {return (a < b) ? 1.0f : 0;}
__device__ float op_eq(float a, float b) {return (a == b) ? 1.0f : 0;}
__device__ float op_ge(float a, float b) {return (a >= b) ? 1.0f : 0;}
__device__ float op_le(float a, float b) {return (a <= b) ? 1.0f : 0;}
__device__ float op_ne(float a, float b) {return (a != b) ? 1.0f : 0;}
__device__ float op_max(float a, float b) {return max(a,b);}
__device__ float op_min(float a, float b) {return min(a,b);}
__device__ int iop_add(int a, int b) {return a+b;}
__device__ int iop_sub(int a, int b) {return a-b;}
__device__ int iop_mul(int a, int b) {return a*b;}
__device__ int iop_div(int a, int b) {return a/b;}
__device__ int iop_gt(int a, int b) {return (a > b) ? 1 : 0;}
__device__ int iop_lt(int a, int b) {return (a < b) ? 1 : 0;}
__device__ int iop_eq(int a, int b) {return (a == b) ? 1 : 0;}
__device__ int iop_ge(int a, int b) {return (a >= b) ? 1 : 0;}
__device__ int iop_le(int a, int b) {return (a <= b) ? 1 : 0;}
__device__ int iop_ne(int a, int b) {return (a != b) ? 1 : 0;}
typedef float (*optype)(float,float);
typedef int (*ioptype)(int,int);
__device__ const optype operators[] = {
op_add,
op_sub,
op_mul,
op_div,
op_gt,
op_lt,
op_eq,
op_ge,
op_le,
op_ne,
op_max,
op_min};
__device__ const ioptype ioperators[] = {
iop_add,
iop_sub,
iop_mul,
iop_div,
iop_gt,
iop_lt,
iop_eq,
iop_ge,
iop_le,
iop_ne};
__device__ float fn_abs(float a) {return abs(a);}
__device__ float fn_exp(float a) {return expf(a);}
__device__ float fn_log(float a) {return logf(a);}
__device__ float fn_expm1(float a) {return expm1f(a);}
__device__ float fn_sqrt(float a) {return sqrtf(a);}
__device__ float fn_ln(float a) {return logf(a);}
__device__ float fn_log10(float a) {return log10f(a);}
__device__ float fn_log1p(float a) {return log1pf(a);}
__device__ float fn_cos(float a) {return cosf(a);}
__device__ float fn_sin(float a) {return sinf(a);}
__device__ float fn_tan(float a) {return tanf(a);}
__device__ float fn_cosh(float a) {return coshf(a);}
__device__ float fn_sinh(float a) {return sinhf(a);}
__device__ float fn_tanh(float a) {return tanhf(a);}
__device__ float fn_acos(float a) {return acosf(a);}
__device__ float fn_asin(float a) {return asinf(a);}
__device__ float fn_atan(float a) {return atanf(a);}
__device__ float fn_acosh(float a) {return acoshf(a);}
__device__ float fn_asinh(float a) {return asinhf(a);}
__device__ float fn_atanh(float a) {return atanhf(a);}
__device__ float fn_erf(float a) {return erff(a);}
__device__ float fn_erfinv(float a) {return erfinvf(a);}
__device__ float fn_erfc(float a) {return erfcf(a);}
__device__ float fn_erfcinv(float a) {return erfcinvf(a);}
__device__ float fn_gammaln(float a) {return lgammaf(a);}
__device__ float fn_gamma(float a) {return tgammaf(a);}
__device__ float fn_ceil(float a) {return ceilf(a);}
__device__ float fn_floor(float a) {return floorf(a);}
__device__ float fn_round(float a) {return roundf(a);}
__device__ float fn_trunc(float a) {return truncf(a);}
__device__ float fn_sign(float a) {return (a>0) ? 1.0f : ((a<0) ? -1.0f : 0);}
__device__ float fn_j0(float a) {return j0f(a);}
__device__ float fn_j1(float a) {return j1f(a);}
//__device__ float fn_jn(float a) {return jnf(a);}
__device__ float fn_y0(float a) {return y0f(a);}
__device__ float fn_y1(float a) {return y1f(a);}
//__device__ float fn_yn(float a) {return ynf(a);}
__device__ float fn_exppsi(float a) {return (a<1.0f) ? 0.5f*a*a : a-0.5f;}
__device__ float fn_atan2(float a, float b) {return atan2f(a, b);}
__device__ float fn_pow(float a, float b) {return powf(a, b);}
typedef float (*fntype)(float);
__device__ const fntype fctns[35] = {
fn_abs,
fn_exp,
fn_expm1,
fn_sqrt,
fn_ln,
fn_log10,
fn_log1p,
fn_cos,
fn_sin,
fn_tan,
fn_cosh,
fn_sinh,
fn_tanh,
fn_acos,
fn_asin,
fn_atan,
fn_acosh,
fn_asinh,
fn_atanh,
fn_erf,
fn_erfinv,
fn_erfc,
fn_erfcinv,
fn_gammaln,
fn_gamma,
fn_ceil,
fn_floor,
fn_round,
fn_trunc,
fn_sign,
fn_j0,
fn_j1,
fn_y0,
fn_y1,
fn_exppsi};
__device__ const optype fctns2[2] = {
fn_atan2,
fn_pow};
__global__ void __apply_gfun(float *A, float *B, int N, int opn) {
fntype fn = fctns[opn];
int ip = threadIdx.x + blockDim.x * blockIdx.x;
for (int i = ip; i < N; i += blockDim.x * gridDim.x) {
B[i] = fn(A[i]);
}
}
int apply_gfun(float *A, float *B, int N, int opn) {
int nthreads = 32;
int nblocks = 1;
while (nblocks * nthreads < N) {
if (nblocks < 16) {
nblocks = 2*nblocks;
} else if (nthreads < 1024) {
nthreads = 2*nthreads;
} else {
nblocks = 2*nblocks;
}
}
__apply_gfun<<<nblocks,nthreads>>>(A, B, N, opn);
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __apply_gfun2(float *A, float *B, float *C, int N, int opn) {
optype fn = fctns2[opn];
int ip = threadIdx.x + blockDim.x * blockIdx.x;
for (int i = ip; i < N; i += blockDim.x * gridDim.x) {
C[i] = fn(A[i], B[i]);
}
}
int apply_gfun2(float *A, float *B, float *C, int N, int opn) {
int nthreads = 32;
int nblocks = 1;
while (nblocks * nthreads < N) {
if (nblocks < 16) {
nblocks = 2*nblocks;
} else if (nthreads < 1024) {
nthreads = 2*nthreads;
} else {
nblocks = 2*nblocks;
}
}
__apply_gfun2<<<nblocks,nthreads>>>(A, B, C, N, opn);
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __apply_full(float *A, float *B, float *C, int N, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * blockIdx.x;
for (int i = ip; i < N; i += blockDim.x * gridDim.x) {
C[i] = op(A[i],B[i]);
}
}
__global__ void __apply_right_col(float *A, float *B, float *C, int nrows, int ncols, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * blockIdx.x;
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x) {
C[i] = op(A[i],B[i % nrows]);
}
}
__global__ void __apply_right_row(float *A, float *B, float *C, int nrows, int ncols, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * blockIdx.x;
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x) {
C[i] = op(A[i],B[i / nrows]);
}
}
__global__ void __apply_left_col(float *A, float *B, float *C, int nrows, int ncols, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * blockIdx.x;
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x) {
C[i] = op(A[i % nrows],B[i]);
}
}
__global__ void __apply_left_row(float *A, float *B, float *C, int nrows, int ncols, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * blockIdx.x;
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x) {
C[i] = op(A[i / nrows],B[i]);
}
}
__global__ void __apply_right_val(float *A, float *B, float *C, int nrows, int ncols, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * blockIdx.x;
float val = B[0];
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x) {
C[i] = op(A[i],val);
}
}
__global__ void __apply_left_val(float *A, float *B, float *C, int nrows, int ncols, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * blockIdx.x;
float val = A[0];
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x) {
C[i] = op(val,B[i]);
}
}
int apply_binop(float *A, int Anrows, int Ancols,
float *B, int Bnrows, int Bncols, float *C, int opn) {
int N = max(Anrows, Bnrows)*max(Ancols, Bncols);
int nthreads = 32;
int nblocks = 1;
while (nblocks * nthreads < N) {
if (nblocks < 16) {
nblocks = 2*nblocks;
} else if (nthreads < 1024) {
nthreads = 2*nthreads;
} else {
nblocks = 2*nblocks;
}
}
if (Anrows == Bnrows && Ancols == Bncols) {
__apply_full<<<nblocks,nthreads>>>(A, B, C, N, opn);
} else if (Anrows == Bnrows && Bncols == 1) {
__apply_right_col<<<nblocks,nthreads>>>(A, B, C, Anrows, Ancols, opn);
} else if (Ancols == Bncols && Bnrows == 1) {
__apply_right_row<<<nblocks,nthreads>>>(A, B, C, Anrows, Ancols, opn);
} else if (Anrows == Bnrows && Ancols == 1) {
__apply_left_col<<<nblocks,nthreads>>>(A, B, C, Bnrows, Bncols, opn);
} else if (Ancols == Bncols && Anrows == 1) {
__apply_left_row<<<nblocks,nthreads>>>(A, B, C, Bnrows, Bncols, opn);
} else if (Bnrows == 1 && Bncols == 1) {
__apply_right_val<<<nblocks,nthreads>>>(A, B, C, Anrows, Ancols, opn);
} else if (Anrows == 1 && Ancols == 1) {
__apply_left_val<<<nblocks,nthreads>>>(A, B, C, Bnrows, Bncols, opn);
}
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __apply_full_int(int *A, int *B, int *C, int N, int opn) {
ioptype op = ioperators[opn];
int ip = threadIdx.x + blockDim.x * blockIdx.x;
for (int i = ip; i < N; i += blockDim.x * gridDim.x) {
C[i] = op(A[i],B[i]);
}
}
__global__ void __apply_right_col_int(int *A, int *B, int *C, int nrows, int ncols, int opn) {
ioptype op = ioperators[opn];
int ip = threadIdx.x + blockDim.x * blockIdx.x;
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x) {
C[i] = op(A[i],B[i % nrows]);
}
}
__global__ void __apply_right_row_int(int *A, int *B, int *C, int nrows, int ncols, int opn) {
ioptype op = ioperators[opn];
int ip = threadIdx.x + blockDim.x * blockIdx.x;
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x) {
C[i] = op(A[i],B[i / nrows]);
}
}
__global__ void __apply_left_col_int(int *A, int *B, int *C, int nrows, int ncols, int opn) {
ioptype op = ioperators[opn];
int ip = threadIdx.x + blockDim.x * blockIdx.x;
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x) {
C[i] = op(A[i % nrows],B[i]);
}
}
__global__ void __apply_left_row_int(int *A, int *B, int *C, int nrows, int ncols, int opn) {
ioptype op = ioperators[opn];
int ip = threadIdx.x + blockDim.x * blockIdx.x;
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x) {
C[i] = op(A[i / nrows],B[i]);
}
}
__global__ void __apply_right_val_int(int *A, int *B, int *C, int nrows, int ncols, int opn) {
ioptype op = ioperators[opn];
int ip = threadIdx.x + blockDim.x * blockIdx.x;
int val = B[0];
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x) {
C[i] = op(A[i],val);
}
}
__global__ void __apply_left_val_int(int *A, int *B, int *C, int nrows, int ncols, int opn) {
ioptype op = ioperators[opn];
int ip = threadIdx.x + blockDim.x * blockIdx.x;
int val = A[0];
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x) {
C[i] = op(val,B[i]);
}
}
int apply_biniop(int *A, int Anrows, int Ancols,
int *B, int Bnrows, int Bncols,
int *C, int opn) {
int N = max(Anrows, Bnrows)*max(Ancols, Bncols);
int nthreads = 32;
int nblocks = 1;
while (nblocks * nthreads < N) {
if (nblocks < 16) {
nblocks = 2*nblocks;
} else if (nthreads < 1024) {
nthreads = 2*nthreads;
} else {
nblocks = 2*nblocks;
}
}
if (Anrows == Bnrows && Ancols == Bncols) {
__apply_full_int<<<nblocks,nthreads>>>(A, B, C, N, opn);
} else if (Anrows == Bnrows && Bncols == 1) {
__apply_right_col_int<<<nblocks,nthreads>>>(A, B, C, Anrows, Ancols, opn);
} else if (Ancols == Bncols && Bnrows == 1) {
__apply_right_row_int<<<nblocks,nthreads>>>(A, B, C, Anrows, Ancols, opn);
} else if (Anrows == Bnrows && Ancols == 1) {
__apply_left_col_int<<<nblocks,nthreads>>>(A, B, C, Bnrows, Bncols, opn);
} else if (Ancols == Bncols && Anrows == 1) {
__apply_left_row_int<<<nblocks,nthreads>>>(A, B, C, Bnrows, Bncols, opn);
} else if (Bnrows == 1 && Bncols == 1) {
__apply_right_val_int<<<nblocks,nthreads>>>(A, B, C, Anrows, Ancols, opn);
} else if (Anrows == 1 && Ancols == 1) {
__apply_left_val_int<<<nblocks,nthreads>>>(A, B, C, Bnrows, Bncols, opn);
}
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __dsmult(int nrows, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
for (int i = threadIdx.x; i < nrows; i += blockDim.x) {
float sum = 0;
for (int j = jstart; j < jend ; j++) {
sum += A[i + nrows * Bir[j]] * Bdata[j];
if (j == jend-1 || Bic[j] != Bic[j+1]) {
atomicAdd(&C[i + nrows * Bic[j]], sum);
sum = 0;
}
}
}
}
int dsmult(int nrows, int ncols, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C) {
int nthreads = min(1024, nrows);
int nblocks = min(1024*1024, ncols);
__dsmult<<<nblocks,nthreads>>>(nrows, nnz, A, Bdata, Bir, Bic, C);
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __dsmultT(int nrows, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
for (int i = threadIdx.x; i < nrows; i += blockDim.x) {
float aval = 0;
for (int j = jstart; j < jend ; j++) {
if (j == jstart || Bic[j-1] != Bic[j]) {
aval = A[i + nrows * Bic[j]];
}
atomicAdd(&C[i + nrows * Bir[j]], aval * Bdata[j]);
}
}
}
int dsmultT(int nrows, int ncols, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C) {
int nthreads = min(1024, nrows);
int nblocks = min(1024*1024, ncols);
__dsmultT<<<nblocks,nthreads>>>(nrows, nnz, A, Bdata, Bir, Bic, C);
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __dds(int nrows, int nnz, float *A, float *B, int *Cir, int *Cic, float *P);
__global__ void __reduce1op(int nrows, int ncols, float *A, float *B, int opn);
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ > 200
__global__ void __dds(int nrows, int nnz, float *A, float *B, int *Cir, int *Cic, float *P) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
for (int j = jstart; j < jend ; j++) {
float sum = 0;
int aoff = nrows * Cir[j];
int boff = nrows * Cic[j];
for (int i = threadIdx.x; i < nrows; i += blockDim.x) {
sum += A[i + aoff] * B[i + boff];
}
for (int i = 1; i < blockDim.x; i *= 2) {
sum = sum + __shfl_down(sum, i);
}
if (threadIdx.x == 0) {
P[j] = sum;
}
}
}
__global__ void __reduce1op(int nrows, int ncols, float *A, float *B, int opn) {
optype op = operators[opn];
int basecol = threadIdx.y + blockDim.y * blockIdx.x;
for (int icol = basecol; icol < ncols; icol += blockDim.y * gridDim.x) {
float v = A[threadIdx.x + icol * nrows];
for (int i = threadIdx.x + blockDim.x; i < nrows; i += blockDim.x) {
v = op(v, A[i + icol * nrows]);
}
for (int i = 1; i < blockDim.x; i *= 2) {
v = op(v, __shfl_down(v, i));
}
if (threadIdx.x == 0) {
B[icol] = v;
}
}
}
#else
__global__ void __dds(int nrows, int nnz, float *A, float *B, int *Cir, int *Cic, float *P) {
__shared__ float parts[1][33];
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
for (int j = jstart; j < jend ; j++) {
float sum = 0;
int aoff = nrows * Cir[j];
int boff = nrows * Cic[j];
for (int i = threadIdx.x; i < nrows; i += blockDim.x) {
sum += A[i + aoff] * B[i + boff];
}
parts[0][threadIdx.x] = sum;
for (int i = 1; i < blockDim.x; i *= 2) {
if (i + threadIdx.x < blockDim.x) {
parts[0][threadIdx.x] = parts[0][threadIdx.x] + parts[0][i + threadIdx.x];
}
}
if (threadIdx.x == 0) {
P[j] = parts[0][0];
}
}
}
__global__ void __reduce1op(int nrows, int ncols, float *A, float *B, int opn) {
__shared__ float parts[32][33];
optype op = operators[opn];
for (int icol = threadIdx.y + blockIdx.y * blockDim.y; icol < ncols; icol += blockDim.y * gridDim.y) {
float v = A[threadIdx.x + icol * nrows];
for (int irow = threadIdx.x + blockDim.x; irow < nrows; irow += blockDim.x) {
v = op(v, A[irow + icol * nrows]);
}
parts[threadIdx.x][threadIdx.y] = v;
for (int i = 1; i < blockDim.x; i *= 2) {
if (i + threadIdx.x < blockDim.x) {
parts[threadIdx.x][threadIdx.y] = op(parts[threadIdx.x][threadIdx.y], parts[i + threadIdx.x][threadIdx.y]);
}
}
if (threadIdx.x == 0) {
B[icol] = parts[0][threadIdx.y];
}
__syncthreads();
}
}
#endif
#endif
#define BLOCKDIM 32
__global__ void __transpose(float *in, int instride, float *out, int outstride, int nrows, int ncols) {
int nx = BLOCKDIM * gridDim.x;
int ny = BLOCKDIM * gridDim.y;
int ix = BLOCKDIM * blockIdx.x;
int iy = BLOCKDIM * blockIdx.y;
__shared__ float tile[BLOCKDIM][BLOCKDIM+1];
for (int yb = iy; yb < ncols; yb += ny) {
for (int xb = ix; xb < nrows; xb += nx) {
if (xb + threadIdx.x < nrows) {
int ylim = min(ncols, yb + BLOCKDIM);
for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) {
tile[threadIdx.x][y-yb] = in[threadIdx.x+xb + y*instride];
}
}
__syncthreads();
if (yb + threadIdx.x < ncols) {
int xlim = min(nrows, xb + BLOCKDIM);
for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) {
out[threadIdx.x + yb + x*outstride] = tile[x-xb][threadIdx.x];
}
}
__syncthreads();
}
}
}
int transpose(float *in, int instride, float *out, int outstride, int nrows, int ncols) {
const dim3 griddims(32,32);
const dim3 blockdims(BLOCKDIM,16,1);
cudaError_t err;
__transpose<<<griddims,blockdims>>>(in, instride, out, outstride, nrows, ncols);
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {fprintf(stderr, "cuda error in transpose"); return err;}
return 0;
}
int dds(int nrows, int nnz, float *A, float *B, int *Cir, int *Cic, float *P) {
int nthreads = min(32, nrows);
int nblocks = min(32*1024*1024, max(1,nnz/8));
__dds<<<nblocks,nthreads>>>(nrows, nnz, A, B, Cir, Cic, P);
cudaError_t err = cudaGetLastError();
return err;
}
int reduce1op(int nrows, int ncols, float *A, float *B, int opn) {
int blkx = min(32, nrows);
int blky = min(32, ncols);
int nblks = max(1, ((int)(((long long)nrows) * ncols / blkx / blky / 16)));
const dim3 blkdims(blkx,blky,1);
const dim3 griddims(1,nblks,1);
__reduce1op<<<griddims,blkdims>>>(nrows, ncols, A, B, opn);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __reduce2op(int nrows, int ncols, float *A, float *B, int opn) {
__shared__ float parts[32][33];
optype op = operators[opn];
int baserow = threadIdx.x + blockDim.x * blockIdx.x;
for (int irow = baserow; irow < nrows; irow += blockDim.x * gridDim.x) {
float v = A[irow + threadIdx.y * nrows];
for (int icol = threadIdx.y + blockDim.y; icol < ncols; icol += blockDim.y) {
v = op(v, A[irow + icol * nrows]);
}
parts[threadIdx.x][threadIdx.y] = v;
__syncthreads();
float newv = 0;
for (int i = 1; i < blockDim.y; i *= 2) {
if (i + threadIdx.y < blockDim.y) newv = parts[threadIdx.x][i+threadIdx.y];
__syncthreads();
if (i + threadIdx.y < blockDim.y) parts[threadIdx.x][threadIdx.y] = op(parts[threadIdx.x][threadIdx.y], newv);
__syncthreads();
}
if (threadIdx.y == 0) {
B[irow] = parts[threadIdx.x][0];
}
__syncthreads();
}
}
int reduce2op(int nrows, int ncols, float *A, float *B, int opn) {
int blkx = min(32, nrows);
int blky = min(32, ncols);
int nblks = max(1, ((int)(((long long)nrows) * ncols / blkx / blky / 16)));
const dim3 blkdims(blkx,blky,1);
const dim3 griddims(nblks,1,1);
__reduce2op<<<griddims,blkdims>>>(nrows, ncols, A, B, opn);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __embedmat(float *a, long long *b, int nrows, int ncols) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < nrows*ncols; i += blockDim.x*gridDim.x) {
float v = a[i];
int vi = *((int *)&v);
int mask = (vi >> 31) | 0x80000000;
vi = vi ^ mask;
b[i] = (long long)vi + (((long long)(i/nrows))<<32);
}
}
__global__ void __extractmat(float *a, long long *b, int nrows, int ncols) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < nrows*ncols; i += blockDim.x*gridDim.x) {
long long v = b[i];
int vi = *((int *)&v);
int mask = (~(vi >> 31)) | 0x80000000;
vi = vi ^ mask;
a[i] = *((float *)&vi);
}
}
void setsizes(int N, int *nblocksp, int *nthreadsp) {
int nblocks = 32;
int nthreads = 1;
while (nblocks * nthreads < N) {
if (nblocks < 16) {
nblocks = 2*nblocks;
} else if (nthreads < 1024) {
nthreads = 2*nthreads;
} else {
nblocks = 2*nblocks;
}
}
*nblocksp = nblocks;
*nthreadsp = nthreads;
}
int embedmat(float *a, long long *b, int nrows, int ncols) {
int nthreads;
int nblocks;
setsizes(nrows*ncols, &nblocks, &nthreads);
__embedmat<<<nblocks,nthreads>>>(a, b, nrows, ncols);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
int extractmat(float *a, long long *b, int nrows, int ncols) {
int nthreads;
int nblocks;
setsizes(nrows*ncols, &nblocks, &nthreads);
__extractmat<<<nblocks,nthreads>>>(a, b, nrows, ncols);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
//#include <thrust/detail/backend/cuda/detail/b40c/radixsort_api.h>
//#include "myradix_sort.inl"
#include <thrust/sort.h>
#include <thrust/device_ptr.h>
#include <thrust/reverse.h>
int rsortsizex(int N) {
thrust::detail::backend::cuda::detail::b40c_thrust::RadixSortingEnactor<float,unsigned int> sorter(N);
return sorter.SpineElements();
}
int rsortsizey(int N) {
thrust::detail::backend::cuda::detail::b40c_thrust::RadixSortingEnactor<long long,unsigned int> sorter(N);
return sorter.SpineElements();
}
int rsortx(float *pkeys, unsigned int *pvals, float *tkeys, unsigned int *tvals,
int *ispine, bool * bflags, int nrows, int ncols) {
int i;
cudaError_t err;
thrust::detail::backend::cuda::detail::b40c_thrust::RadixSortingEnactor<float,unsigned int> sorter(nrows);
thrust::detail::backend::cuda::detail::b40c_thrust::RadixSortStorage<float,unsigned int> storage;
storage.d_alt_keys = tkeys;
storage.d_alt_values = tvals;
storage.d_spine = ispine;
storage.d_from_alt_storage = bflags;
for (i = 0; i < ncols; i++) {
storage.d_keys = pkeys+i*nrows;
storage.d_values = pvals+i*nrows;
sorter.EnactSort(storage);
cudaDeviceSynchronize();
err = cudaGetLastError();
}
return err;
}
int rsorty(long long *pkeys, unsigned int *pvals, long long *tkeys, unsigned int *tvals, int *ispine, bool * bflags, int N) {
thrust::detail::backend::cuda::detail::b40c_thrust::RadixSortingEnactor<long long,unsigned int> sorter(N);
thrust::detail::backend::cuda::detail::b40c_thrust::RadixSortStorage<long long,unsigned int> storage;
storage.d_keys = pkeys;
storage.d_values = pvals;
storage.d_alt_keys = tkeys;
storage.d_alt_values = tvals;
storage.d_spine = ispine;
storage.d_from_alt_storage = bflags;
sorter.EnactSort(storage);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
int rsort(long long *pkeys, unsigned int *pvals, int N, int dev) {
cudaSetDevice(dev);
thrust::device_ptr<long long> keys(pkeys);
thrust::device_ptr<unsigned int> vals(pvals);
thrust::sort_by_key(keys, keys + N, vals);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
int rsort2(float *pkeys, unsigned int *pvals, int nrows, int ncols) {
for (int i = 0; i < ncols; i++) {
thrust::device_ptr<float> keys(pkeys+i*nrows);
thrust::device_ptr<unsigned int> vals(pvals+i*nrows);
thrust::sort_by_key(keys, keys + nrows, vals);
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __stratify(float *strata, int n, float *a, float *b, unsigned int *bi, int stride) {
__shared__ float ss[32];
__shared__ unsigned int ibin[32];
__shared__ unsigned int ebin[32];
__shared__ unsigned int todo[32];
__shared__ float bins[64][33];
__shared__ unsigned int topush;
int tid = threadIdx.x;
ss[tid] = strata[tid];
ibin[tid] = 0;
for (int i = 0; i < n; i += blockDim.x * gridDim.x) {
int ii = i + tid + blockDim.x * blockIdx.x;
if (tid == 0) topush = 0;
if (ii < n) {
float v = a[ii];
int j = 1;
j = (v > ss[j-1]) ? 2*j+1 : 2*j;
j = (v > ss[j-1]) ? 2*j+1 : 2*j;
j = (v > ss[j-1]) ? 2*j+1 : 2*j;
j = (v > ss[j-1]) ? 2*j+1 : 2*j;
j = (v > ss[j-1]) ? 2*j+1 : 2*j;
j = j - 32;
int k = atomicInc(&ibin[j], 256);
bins[k][j] = v;
if (k == 31) {
k = atomicInc(&topush, 1024);
todo[k] = j;
}
}
if (ibin[tid] >= 32) {
ebin[tid] = atomicAdd(&bi[tid], 32);
ibin[tid] = ibin[tid] - 32;
}
for (int k = 0; k < topush; k++) {
int j = todo[k];
b[j*stride + ebin[j] + tid] = bins[ibin[j] + tid][j];
}
}
ebin[tid] = atomicAdd(&bi[tid], ibin[tid]);
for (int j = 0; j < 32; j++) {
if (tid < ibin[j]) {
b[j*stride + ebin[j] + tid] = bins[tid][j];
}
}
}
int stratify(float *strata, int n, float *a, float *b, unsigned int *bi, int stride) {
__stratify<<<40,32>>>(strata, n, a, b, bi, stride);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
#define SNDVALS 256
#define SNDGRPS 4
#define SNTHREADS 1024
#define SBIGBLK (4*1024)
__global__ void __stratifycounts(float *strata, int n, float *a, unsigned int *bi) {
__shared__ unsigned int ic[SNDVALS][SNDGRPS];
__shared__ float ss[SNDVALS];
int istart = (int)(((long long)blockIdx.x) * n / gridDim.x);
int iend = (int)(((long long)(blockIdx.x+1)) * n / gridDim.x);
int bibase = SNDVALS * (blockIdx.x + istart / SBIGBLK);
int tid = threadIdx.x + threadIdx.y * blockDim.x;
if (threadIdx.y == 0) {
ss[threadIdx.x] = strata[threadIdx.x];
}
for (int i = istart; i < iend; i += SBIGBLK) {
__syncthreads();
if (threadIdx.y < SNDGRPS) {
ic[threadIdx.x][threadIdx.y] = 0;
}
__syncthreads();
for (int k = i + tid; k < min(iend, i + tid + SBIGBLK); k += SNTHREADS) {
float v = a[k];
int j = 0;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = j - SNDVALS + 1;
atomicInc(&ic[j][threadIdx.y], 65536*32767);
}
__syncthreads();
if (threadIdx.y == 0) {
bi[bibase + threadIdx.x] = ic[threadIdx.x][0] + ic[threadIdx.x][1] + ic[threadIdx.x][2] + ic[threadIdx.x][3];
}
bibase += SNDVALS;
}
}
int stratifycounts(float *strata, int n, float *a, unsigned int *bi) {
const dim3 blockdims(SNDVALS, SNTHREADS/SNDVALS, 1);
const dim3 griddims(8,1,1);
__stratifycounts<<<griddims,blockdims>>>(strata, n, a, bi);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
#define RNDVALS 256
#define RNTHREADS 256
#define RNDBITS 8
#define RBIGBLK (4*1024)
__global__ void __radixcounts(float *a, int n, int digit, unsigned int *bi) {
__shared__ unsigned int ic[RNDVALS];
int istart = (int)(((long long)blockIdx.x) * n / gridDim.x);
int iend = (int)(((long long)(blockIdx.x+1)) * n / gridDim.x);
int tid = threadIdx.x;
int bibase = RNDVALS * (blockIdx.x + istart / RBIGBLK);
for (int i = istart; i < iend; i += RBIGBLK) {
__syncthreads();
ic[threadIdx.x] = 0;
__syncthreads();
for (int j = i + tid; j < min(iend, i+tid+RBIGBLK); j += RNTHREADS) {
float v = a[j];
unsigned char *cv = (unsigned char *)&v;
atomicInc(&ic[cv[digit]], 65536*32767);
}
__syncthreads();
bi[bibase + threadIdx.x] = ic[threadIdx.x];
bibase += RNDVALS;
}
}
int radixcounts(float *a, int n, int digit, unsigned int *bi) {
const dim3 blockdims(RNTHREADS,1,1);
const dim3 griddims(32,1,1);
__radixcounts<<<griddims,blockdims>>>(a, n, digit, bi);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
#ifdef TEST
int main(int argc, char **argv) {
int m=8, n=8, opn = 0;
float *dA, *dB, *dC, *A, *B, *C;
if (argc > 1) {
sscanf(argv[1], "%d", &opn);
if (argc > 2) {
sscanf(argv[2], "%d", &m);
if (argc > 3) {
sscanf(argv[3], "%d", &n);
}
}
}
A = (float *)malloc(m*n*sizeof(float));
B = (float *)malloc(m*n*sizeof(float));
C = (float *)malloc(m*n*sizeof(float));
cudaMalloc((void**)&dA, m*n*sizeof(float));
cudaMalloc((void**)&dB, m*n*sizeof(float));
cudaMalloc((void**)&dC, m*n*sizeof(float));
for (int i = 0; i < m*n; i++) {
A[i] = 1.0f;
B[i] = 2.0f;
}
cudaMemcpy(dA, A, m*n*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dB, B, m*n*sizeof(float), cudaMemcpyHostToDevice);
printf("A %f %f %f %f\n", A[0], A[1], A[2], A[3]);
printf("B %f %f %f %f\n", B[0], B[1], B[2], B[3]);
MatKernel(dA, m, n, dB, m, n, dC, opn);
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
fprintf(stderr, "CUDA error %d", err);
exit(1);
}
cudaMemcpy(C, dC, m*n*sizeof(float), cudaMemcpyDeviceToHost);
printf("C %f %f %f %f\n", C[0], C[1], C[2], C[3]);
printf("A %f %f %f %f\n", A[0], A[1], A[2], A[3]);
printf("B %f %f %f %f\n", B[0], B[1], B[2], B[3]);
if (dA != NULL) cudaFree(dA);
if (dB != NULL) cudaFree(dB);
if (dC != NULL) cudaFree(dC);
if (C != NULL) free(C);
}
#endif
|
a2a2aecf20dc39871d55bc26110481ef295f7eeb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utils.h"
#include <string>
#include "loadSaveImage.h"
#include "device_launch_parameters.h"
#include <thrust/extrema.h>
//chroma-LogLuminance Space
static float *d_x__;
static float *d_y__;
static float *d_logY__;
//memory for the cdf
static unsigned int *d_cdf__;
static const int numBins = 1024;
size_t numRows__;
size_t numCols__;
/* Copied from Mike's IPython notebook with some minor modifications
* Mainly double precision constants to floats and log10 -> log10f
* Also removed Luminance (Y) channel since it is never used eke*/
__global__ void rgb_to_xyY(
float* d_r,
float* d_g,
float* d_b,
float* d_x,
float* d_y,
float* d_log_Y,
float delta,
int num_pixels_y,
int num_pixels_x )
{
int ny = num_pixels_y;
int nx = num_pixels_x;
int2 image_index_2d = make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y );
int image_index_1d = ( nx * image_index_2d.y ) + image_index_2d.x;
if ( image_index_2d.x < nx && image_index_2d.y < ny )
{
float r = d_r[ image_index_1d ];
float g = d_g[ image_index_1d ];
float b = d_b[ image_index_1d ];
float X = ( r * 0.4124f ) + ( g * 0.3576f ) + ( b * 0.1805f );
float Y = ( r * 0.2126f ) + ( g * 0.7152f ) + ( b * 0.0722f );
float Z = ( r * 0.0193f ) + ( g * 0.1192f ) + ( b * 0.9505f );
float L = X + Y + Z;
float x = X / L;
float y = Y / L;
float log_Y = log10f( delta + Y );
d_x[ image_index_1d ] = x;
d_y[ image_index_1d ] = y;
d_log_Y[ image_index_1d ] = log_Y;
}
}
/* Copied from Mike's IPython notebook *
Modified just by having threads read the
normalization constant directly from device memory
instead of copying it back */
__global__ void normalize_cdf(
unsigned int* d_input_cdf,
float* d_output_cdf,
int n
)
{
const float normalization_constant = 1.f / d_input_cdf[n - 1];
int global_index_1d = ( blockIdx.x * blockDim.x ) + threadIdx.x;
if ( global_index_1d < n )
{
unsigned int input_value = d_input_cdf[ global_index_1d ];
float output_value = input_value * normalization_constant;
d_output_cdf[ global_index_1d ] = output_value;
}
}
/* Copied from Mike's IPython notebook *
Modified double constants -> float *
Perform tone mapping based upon new *
luminance scaling */
__global__ void tonemap(
float* d_x,
float* d_y,
float* d_log_Y,
float* d_cdf_norm,
float* d_r_new,
float* d_g_new,
float* d_b_new,
float min_log_Y,
float max_log_Y,
float log_Y_range,
int num_bins,
int num_pixels_y,
int num_pixels_x )
{
int ny = num_pixels_y;
int nx = num_pixels_x;
int2 image_index_2d = make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y );
int image_index_1d = ( nx * image_index_2d.y ) + image_index_2d.x;
if ( image_index_2d.x < nx && image_index_2d.y < ny )
{
float x = d_x[ image_index_1d ];
float y = d_y[ image_index_1d ];
float log_Y = d_log_Y[ image_index_1d ];
int bin_index = min( num_bins - 1, int( (num_bins * ( log_Y - min_log_Y ) ) / log_Y_range ) );
float Y_new = d_cdf_norm[ bin_index ];
float X_new = x * ( Y_new / y );
float Z_new = ( 1 - x - y ) * ( Y_new / y );
float r_new = ( X_new * 3.2406f ) + ( Y_new * -1.5372f ) + ( Z_new * -0.4986f );
float g_new = ( X_new * -0.9689f ) + ( Y_new * 1.8758f ) + ( Z_new * 0.0415f );
float b_new = ( X_new * 0.0557f ) + ( Y_new * -0.2040f ) + ( Z_new * 1.0570f );
d_r_new[ image_index_1d ] = r_new;
d_g_new[ image_index_1d ] = g_new;
d_b_new[ image_index_1d ] = b_new;
}
}
//return types are void since any internal error will be handled by quitting
//no point in returning error codes...
void preProcess(float** d_luminance, unsigned int** d_cdf,
size_t *numRows, size_t *numCols,
unsigned int *numberOfBins,
const std::string &filename) {
//make sure the context initializes ok
checkCudaErrors(hipFree(0));
float *imgPtr; //we will become responsible for this pointer
loadImageHDR(filename, &imgPtr, &numRows__, &numCols__);
*numRows = numRows__;
*numCols = numCols__;
//first thing to do is split incoming BGR float data into separate channels
size_t numPixels = numRows__ * numCols__;
float *red = new float[numPixels];
float *green = new float[numPixels];
float *blue = new float[numPixels];
//Remeber image is loaded BGR
for (size_t i = 0; i < numPixels; ++i) {
blue[i] = imgPtr[3 * i + 0];
green[i] = imgPtr[3 * i + 1];
red[i] = imgPtr[3 * i + 2];
}
delete[] imgPtr; //being good citizens are releasing resources
//allocated in loadImageHDR
float *d_red, *d_green, *d_blue; //RGB space
size_t channelSize = sizeof(float) * numPixels;
checkCudaErrors(hipMalloc(&d_red, channelSize));
checkCudaErrors(hipMalloc(&d_green, channelSize));
checkCudaErrors(hipMalloc(&d_blue, channelSize));
checkCudaErrors(hipMalloc(&d_x__, channelSize));
checkCudaErrors(hipMalloc(&d_y__, channelSize));
checkCudaErrors(hipMalloc(&d_logY__, channelSize));
checkCudaErrors(hipMemcpy(d_red, red, channelSize, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_green, green, channelSize, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_blue, blue, channelSize, hipMemcpyHostToDevice));
//convert from RGB space to chrominance/luminance space xyY
const dim3 blockSize(32, 16, 1);
const dim3 gridSize( (numCols__ + blockSize.x - 1) / blockSize.x,
(numRows__ + blockSize.y - 1) / blockSize.y, 1);
hipLaunchKernelGGL(( rgb_to_xyY), dim3(gridSize), dim3(blockSize), 0, 0, d_red, d_green, d_blue,
d_x__, d_y__, d_logY__,
.0001f, numRows__, numCols__);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
*d_luminance = d_logY__;
//allocate memory for the cdf of the histogram
*numberOfBins = numBins;
checkCudaErrors(hipMalloc(&d_cdf__, sizeof(unsigned int) * numBins));
checkCudaErrors(hipMemset(d_cdf__, 0, sizeof(unsigned int) * numBins));
*d_cdf = d_cdf__;
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
delete[] red;
delete[] green;
delete[] blue;
}
void postProcess(const std::string& output_file,
size_t numRows, size_t numCols,
float min_log_Y, float max_log_Y) {
const int numPixels = numRows__ * numCols__;
const int numThreads = 192;
float *d_cdf_normalized;
checkCudaErrors(hipMalloc(&d_cdf_normalized, sizeof(float) * numBins));
//first normalize the cdf to a maximum value of 1
//this is how we compress the range of the luminance channel
hipLaunchKernelGGL(( normalize_cdf), dim3((numBins + numThreads - 1) / numThreads),
dim3(numThreads), 0, 0, d_cdf__,
d_cdf_normalized,
numBins);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
//allocate memory for the output RGB channels
float *h_red, *h_green, *h_blue;
float *d_red, *d_green, *d_blue;
h_red = new float[numPixels];
h_green = new float[numPixels];
h_blue = new float[numPixels];
checkCudaErrors(hipMalloc(&d_red, sizeof(float) * numPixels));
checkCudaErrors(hipMalloc(&d_green, sizeof(float) * numPixels));
checkCudaErrors(hipMalloc(&d_blue, sizeof(float) * numPixels));
float log_Y_range = max_log_Y - min_log_Y;
const dim3 blockSize(32, 16, 1);
const dim3 gridSize( (numCols + blockSize.x - 1) / blockSize.x,
(numRows + blockSize.y - 1) / blockSize.y );
//next perform the actual tone-mapping
//we map each luminance value to its new value
//and then transform back to RGB space
hipLaunchKernelGGL(( tonemap), dim3(gridSize), dim3(blockSize), 0, 0, d_x__, d_y__, d_logY__,
d_cdf_normalized,
d_red, d_green, d_blue,
min_log_Y, max_log_Y,
log_Y_range, numBins,
numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
checkCudaErrors(hipMemcpy(h_red, d_red, sizeof(float) * numPixels, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(h_green, d_green, sizeof(float) * numPixels, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(h_blue, d_blue, sizeof(float) * numPixels, hipMemcpyDeviceToHost));
//recombine the image channels
float *imageHDR = new float[numPixels * 3];
for (int i = 0; i < numPixels; ++i) {
imageHDR[3 * i + 0] = h_blue[i];
imageHDR[3 * i + 1] = h_green[i];
imageHDR[3 * i + 2] = h_red[i];
}
saveImageHDR(imageHDR, numRows, numCols, output_file);
delete[] imageHDR;
delete[] h_red;
delete[] h_green;
delete[] h_blue;
//cleanup
checkCudaErrors(hipFree(d_cdf_normalized));
}
void cleanupGlobalMemory(void)
{
checkCudaErrors(hipFree(d_x__));
checkCudaErrors(hipFree(d_y__));
checkCudaErrors(hipFree(d_logY__));
checkCudaErrors(hipFree(d_cdf__));
}
|
a2a2aecf20dc39871d55bc26110481ef295f7eeb.cu
|
#include "utils.h"
#include <string>
#include "loadSaveImage.h"
#include "device_launch_parameters.h"
#include <thrust/extrema.h>
//chroma-LogLuminance Space
static float *d_x__;
static float *d_y__;
static float *d_logY__;
//memory for the cdf
static unsigned int *d_cdf__;
static const int numBins = 1024;
size_t numRows__;
size_t numCols__;
/* Copied from Mike's IPython notebook with some minor modifications
* Mainly double precision constants to floats and log10 -> log10f
* Also removed Luminance (Y) channel since it is never used eke*/
__global__ void rgb_to_xyY(
float* d_r,
float* d_g,
float* d_b,
float* d_x,
float* d_y,
float* d_log_Y,
float delta,
int num_pixels_y,
int num_pixels_x )
{
int ny = num_pixels_y;
int nx = num_pixels_x;
int2 image_index_2d = make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y );
int image_index_1d = ( nx * image_index_2d.y ) + image_index_2d.x;
if ( image_index_2d.x < nx && image_index_2d.y < ny )
{
float r = d_r[ image_index_1d ];
float g = d_g[ image_index_1d ];
float b = d_b[ image_index_1d ];
float X = ( r * 0.4124f ) + ( g * 0.3576f ) + ( b * 0.1805f );
float Y = ( r * 0.2126f ) + ( g * 0.7152f ) + ( b * 0.0722f );
float Z = ( r * 0.0193f ) + ( g * 0.1192f ) + ( b * 0.9505f );
float L = X + Y + Z;
float x = X / L;
float y = Y / L;
float log_Y = log10f( delta + Y );
d_x[ image_index_1d ] = x;
d_y[ image_index_1d ] = y;
d_log_Y[ image_index_1d ] = log_Y;
}
}
/* Copied from Mike's IPython notebook *
Modified just by having threads read the
normalization constant directly from device memory
instead of copying it back */
__global__ void normalize_cdf(
unsigned int* d_input_cdf,
float* d_output_cdf,
int n
)
{
const float normalization_constant = 1.f / d_input_cdf[n - 1];
int global_index_1d = ( blockIdx.x * blockDim.x ) + threadIdx.x;
if ( global_index_1d < n )
{
unsigned int input_value = d_input_cdf[ global_index_1d ];
float output_value = input_value * normalization_constant;
d_output_cdf[ global_index_1d ] = output_value;
}
}
/* Copied from Mike's IPython notebook *
Modified double constants -> float *
Perform tone mapping based upon new *
luminance scaling */
__global__ void tonemap(
float* d_x,
float* d_y,
float* d_log_Y,
float* d_cdf_norm,
float* d_r_new,
float* d_g_new,
float* d_b_new,
float min_log_Y,
float max_log_Y,
float log_Y_range,
int num_bins,
int num_pixels_y,
int num_pixels_x )
{
int ny = num_pixels_y;
int nx = num_pixels_x;
int2 image_index_2d = make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y );
int image_index_1d = ( nx * image_index_2d.y ) + image_index_2d.x;
if ( image_index_2d.x < nx && image_index_2d.y < ny )
{
float x = d_x[ image_index_1d ];
float y = d_y[ image_index_1d ];
float log_Y = d_log_Y[ image_index_1d ];
int bin_index = min( num_bins - 1, int( (num_bins * ( log_Y - min_log_Y ) ) / log_Y_range ) );
float Y_new = d_cdf_norm[ bin_index ];
float X_new = x * ( Y_new / y );
float Z_new = ( 1 - x - y ) * ( Y_new / y );
float r_new = ( X_new * 3.2406f ) + ( Y_new * -1.5372f ) + ( Z_new * -0.4986f );
float g_new = ( X_new * -0.9689f ) + ( Y_new * 1.8758f ) + ( Z_new * 0.0415f );
float b_new = ( X_new * 0.0557f ) + ( Y_new * -0.2040f ) + ( Z_new * 1.0570f );
d_r_new[ image_index_1d ] = r_new;
d_g_new[ image_index_1d ] = g_new;
d_b_new[ image_index_1d ] = b_new;
}
}
//return types are void since any internal error will be handled by quitting
//no point in returning error codes...
void preProcess(float** d_luminance, unsigned int** d_cdf,
size_t *numRows, size_t *numCols,
unsigned int *numberOfBins,
const std::string &filename) {
//make sure the context initializes ok
checkCudaErrors(cudaFree(0));
float *imgPtr; //we will become responsible for this pointer
loadImageHDR(filename, &imgPtr, &numRows__, &numCols__);
*numRows = numRows__;
*numCols = numCols__;
//first thing to do is split incoming BGR float data into separate channels
size_t numPixels = numRows__ * numCols__;
float *red = new float[numPixels];
float *green = new float[numPixels];
float *blue = new float[numPixels];
//Remeber image is loaded BGR
for (size_t i = 0; i < numPixels; ++i) {
blue[i] = imgPtr[3 * i + 0];
green[i] = imgPtr[3 * i + 1];
red[i] = imgPtr[3 * i + 2];
}
delete[] imgPtr; //being good citizens are releasing resources
//allocated in loadImageHDR
float *d_red, *d_green, *d_blue; //RGB space
size_t channelSize = sizeof(float) * numPixels;
checkCudaErrors(cudaMalloc(&d_red, channelSize));
checkCudaErrors(cudaMalloc(&d_green, channelSize));
checkCudaErrors(cudaMalloc(&d_blue, channelSize));
checkCudaErrors(cudaMalloc(&d_x__, channelSize));
checkCudaErrors(cudaMalloc(&d_y__, channelSize));
checkCudaErrors(cudaMalloc(&d_logY__, channelSize));
checkCudaErrors(cudaMemcpy(d_red, red, channelSize, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_green, green, channelSize, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_blue, blue, channelSize, cudaMemcpyHostToDevice));
//convert from RGB space to chrominance/luminance space xyY
const dim3 blockSize(32, 16, 1);
const dim3 gridSize( (numCols__ + blockSize.x - 1) / blockSize.x,
(numRows__ + blockSize.y - 1) / blockSize.y, 1);
rgb_to_xyY<<<gridSize, blockSize>>>(d_red, d_green, d_blue,
d_x__, d_y__, d_logY__,
.0001f, numRows__, numCols__);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
*d_luminance = d_logY__;
//allocate memory for the cdf of the histogram
*numberOfBins = numBins;
checkCudaErrors(cudaMalloc(&d_cdf__, sizeof(unsigned int) * numBins));
checkCudaErrors(cudaMemset(d_cdf__, 0, sizeof(unsigned int) * numBins));
*d_cdf = d_cdf__;
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
delete[] red;
delete[] green;
delete[] blue;
}
void postProcess(const std::string& output_file,
size_t numRows, size_t numCols,
float min_log_Y, float max_log_Y) {
const int numPixels = numRows__ * numCols__;
const int numThreads = 192;
float *d_cdf_normalized;
checkCudaErrors(cudaMalloc(&d_cdf_normalized, sizeof(float) * numBins));
//first normalize the cdf to a maximum value of 1
//this is how we compress the range of the luminance channel
normalize_cdf<<< (numBins + numThreads - 1) / numThreads,
numThreads>>>(d_cdf__,
d_cdf_normalized,
numBins);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
//allocate memory for the output RGB channels
float *h_red, *h_green, *h_blue;
float *d_red, *d_green, *d_blue;
h_red = new float[numPixels];
h_green = new float[numPixels];
h_blue = new float[numPixels];
checkCudaErrors(cudaMalloc(&d_red, sizeof(float) * numPixels));
checkCudaErrors(cudaMalloc(&d_green, sizeof(float) * numPixels));
checkCudaErrors(cudaMalloc(&d_blue, sizeof(float) * numPixels));
float log_Y_range = max_log_Y - min_log_Y;
const dim3 blockSize(32, 16, 1);
const dim3 gridSize( (numCols + blockSize.x - 1) / blockSize.x,
(numRows + blockSize.y - 1) / blockSize.y );
//next perform the actual tone-mapping
//we map each luminance value to its new value
//and then transform back to RGB space
tonemap<<<gridSize, blockSize>>>(d_x__, d_y__, d_logY__,
d_cdf_normalized,
d_red, d_green, d_blue,
min_log_Y, max_log_Y,
log_Y_range, numBins,
numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaMemcpy(h_red, d_red, sizeof(float) * numPixels, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(h_green, d_green, sizeof(float) * numPixels, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(h_blue, d_blue, sizeof(float) * numPixels, cudaMemcpyDeviceToHost));
//recombine the image channels
float *imageHDR = new float[numPixels * 3];
for (int i = 0; i < numPixels; ++i) {
imageHDR[3 * i + 0] = h_blue[i];
imageHDR[3 * i + 1] = h_green[i];
imageHDR[3 * i + 2] = h_red[i];
}
saveImageHDR(imageHDR, numRows, numCols, output_file);
delete[] imageHDR;
delete[] h_red;
delete[] h_green;
delete[] h_blue;
//cleanup
checkCudaErrors(cudaFree(d_cdf_normalized));
}
void cleanupGlobalMemory(void)
{
checkCudaErrors(cudaFree(d_x__));
checkCudaErrors(cudaFree(d_y__));
checkCudaErrors(cudaFree(d_logY__));
checkCudaErrors(cudaFree(d_cdf__));
}
|
4c318c5d4b53c4dc85c9eacea1128cac30a40116.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#pragma region License
/*
The MIT License
Copyright (c) 2009 Sky Morey
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#pragma endregion
#ifndef CUFALLOCWTRACE_C
#define CUFALLOCWTRACE_C
#include "cuFalloc.cu"
const static int TRACEHEAP_SIZE = 2048;
typedef struct __align__(8) _cudaFallocTrace {
volatile __int8* trace;
cuFallocHeapChunk* lastChunk;
int contextIndex;
bool complete;
struct _cudaFallocTrace* deviceTrace;
} fallocTrace;
typedef struct {
unsigned short magic;
unsigned short count;
bool free;
bool showDetail;
} traceChunk;
// All our headers are prefixed with a magic number so we know they're ready
#define CUFALLOCTRACE_MAGIC (unsigned short)0x0A0A
__global__ void FallocWTrace(fallocDeviceHeap* deviceHeap, fallocTrace* deviceTrace) {
volatile __int8* trace = deviceTrace->trace;
if (!trace)
__THROW;
volatile __int8* endTrace = trace + TRACEHEAP_SIZE - sizeof(CUFALLOCTRACE_MAGIC);
cuFallocHeapChunk* chunk = deviceTrace->lastChunk;
if (!chunk) {
chunk = (cuFallocHeapChunk*)((__int8*)deviceHeap + sizeof(fallocDeviceHeap));
// trace
*((int*)trace) = deviceHeap->chunks; trace += sizeof(int);
}
volatile cuFallocHeapChunk* endChunk = (cuFallocHeapChunk*)((__int8*)deviceHeap + sizeof(fallocDeviceHeap) + (CHUNKSIZEALIGN * deviceHeap->chunks));
for (; (trace < endTrace) && (chunk < endChunk); trace += sizeof(traceChunk), chunk = (cuFallocHeapChunk*)((__int8*)chunk + (CHUNKSIZEALIGN * chunk->count))) {
if (chunk->magic != CUFALLOC_MAGIC)
__THROW;
// trace
traceChunk* w = (traceChunk*)trace;
w->magic = CUFALLOCTRACE_MAGIC;
w->count = chunk->count;
if (chunk->next)
w->free = true;
else {
volatile cuFallocHeapChunk* chunk2;
for (chunk2 = deviceHeap->freeChunks; (chunk2 == chunk) || (chunk2 != nullptr); chunk2 = chunk2->next) ;
w->free = (chunk2 == chunk);
}
w->showDetail = (bool)chunk->reserved;
if ((!w->free) && (w->showDetail)) {
/* NEED */
}
}
deviceTrace->lastChunk = chunk;
deviceTrace->complete = (trace < endTrace);
if (deviceTrace->complete) {
*((unsigned short*)trace) = -1; trace += sizeof(CUFALLOCTRACE_MAGIC);
}
deviceTrace->trace = trace;
}
///////////////////////////////////////////////////////////////////////////////
// HOST SIDE
//
// cudaFallocWTraceInit
//
// Takes a buffer length to allocate, creates the memory on the device and
// returns a pointer to it for when a kernel is called. It's up to the caller
// to free it.
//
extern "C" cudaFallocHeap cudaFallocWTraceInit(size_t length, hipError_t* error) {
cudaFallocHeap heap; memset(&heap, 0, sizeof(cudaFallocHeap));
// Allocate a print buffer on the device and zero it
void* deviceTrace;
if ((!error && (hipMalloc((void**)&deviceTrace, TRACEHEAP_SIZE) != hipSuccess)) ||
(error && ((*error = hipMalloc((void**)&deviceTrace, TRACEHEAP_SIZE)) != hipSuccess)))
return heap;
//
return cudaFallocInit(length, error, deviceTrace);
}
//
// cudaFallocWTraceEnd
//
// Frees up the memory which we allocated
//
extern "C" void cudaFallocWTraceEnd(cudaFallocHeap &heap) {
if (!heap.deviceHeap)
return;
hipFree(heap.reserved); heap.reserved = nullptr;
//
cudaFallocEnd(heap);
}
//
// cuFallocSetTraceInfo
//
// Sets a trace Info.
//
extern "C" void cuFallocSetTraceInfo(size_t id, bool showDetail) {
}
//
// cudaFallocTraceInit
//
// Creates a trace Stream.
//
extern "C" fallocTrace* cudaFallocTraceInit() {
fallocTrace* trace = (fallocTrace*)malloc(sizeof(fallocTrace)); memset(trace, 0, sizeof(fallocTrace));
hipMalloc(&trace->deviceTrace, sizeof(fallocTrace));
return trace;
}
//
// cudaFallocTraceStream
//
// Streams till empty.
//
extern "C" void* cudaFallocTraceStream(cudaFallocHeap &heap, fallocTrace* trace, size_t &length) {
if (trace->complete) {
length = 0;
return nullptr;
}
trace->trace = (volatile __int8*)heap.reserved;
size_t r = hipMemcpy(trace->deviceTrace, trace, sizeof(fallocTrace), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( FallocWTrace), dim3(1), dim3(1), 0, 0, heap.deviceHeap, trace->deviceTrace);
hipMemcpy(trace, trace->deviceTrace, sizeof(fallocTrace), hipMemcpyDeviceToHost);
length = (__int8*)trace->trace - heap.reserved;
return heap.reserved;
}
//
// cudaFallocTraceEnd
//
// Frees a trace Stream.
//
extern "C" void cudaFallocTraceEnd(fallocTrace* trace) {
hipFree(trace->deviceTrace);
free(trace);
}
#endif // CUFALLOCWTRACE_C
|
4c318c5d4b53c4dc85c9eacea1128cac30a40116.cu
|
#pragma region License
/*
The MIT License
Copyright (c) 2009 Sky Morey
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#pragma endregion
#ifndef CUFALLOCWTRACE_C
#define CUFALLOCWTRACE_C
#include "cuFalloc.cu"
const static int TRACEHEAP_SIZE = 2048;
typedef struct __align__(8) _cudaFallocTrace {
volatile __int8* trace;
cuFallocHeapChunk* lastChunk;
int contextIndex;
bool complete;
struct _cudaFallocTrace* deviceTrace;
} fallocTrace;
typedef struct {
unsigned short magic;
unsigned short count;
bool free;
bool showDetail;
} traceChunk;
// All our headers are prefixed with a magic number so we know they're ready
#define CUFALLOCTRACE_MAGIC (unsigned short)0x0A0A
__global__ void FallocWTrace(fallocDeviceHeap* deviceHeap, fallocTrace* deviceTrace) {
volatile __int8* trace = deviceTrace->trace;
if (!trace)
__THROW;
volatile __int8* endTrace = trace + TRACEHEAP_SIZE - sizeof(CUFALLOCTRACE_MAGIC);
cuFallocHeapChunk* chunk = deviceTrace->lastChunk;
if (!chunk) {
chunk = (cuFallocHeapChunk*)((__int8*)deviceHeap + sizeof(fallocDeviceHeap));
// trace
*((int*)trace) = deviceHeap->chunks; trace += sizeof(int);
}
volatile cuFallocHeapChunk* endChunk = (cuFallocHeapChunk*)((__int8*)deviceHeap + sizeof(fallocDeviceHeap) + (CHUNKSIZEALIGN * deviceHeap->chunks));
for (; (trace < endTrace) && (chunk < endChunk); trace += sizeof(traceChunk), chunk = (cuFallocHeapChunk*)((__int8*)chunk + (CHUNKSIZEALIGN * chunk->count))) {
if (chunk->magic != CUFALLOC_MAGIC)
__THROW;
// trace
traceChunk* w = (traceChunk*)trace;
w->magic = CUFALLOCTRACE_MAGIC;
w->count = chunk->count;
if (chunk->next)
w->free = true;
else {
volatile cuFallocHeapChunk* chunk2;
for (chunk2 = deviceHeap->freeChunks; (chunk2 == chunk) || (chunk2 != nullptr); chunk2 = chunk2->next) ;
w->free = (chunk2 == chunk);
}
w->showDetail = (bool)chunk->reserved;
if ((!w->free) && (w->showDetail)) {
/* NEED */
}
}
deviceTrace->lastChunk = chunk;
deviceTrace->complete = (trace < endTrace);
if (deviceTrace->complete) {
*((unsigned short*)trace) = -1; trace += sizeof(CUFALLOCTRACE_MAGIC);
}
deviceTrace->trace = trace;
}
///////////////////////////////////////////////////////////////////////////////
// HOST SIDE
//
// cudaFallocWTraceInit
//
// Takes a buffer length to allocate, creates the memory on the device and
// returns a pointer to it for when a kernel is called. It's up to the caller
// to free it.
//
extern "C" cudaFallocHeap cudaFallocWTraceInit(size_t length, cudaError_t* error) {
cudaFallocHeap heap; memset(&heap, 0, sizeof(cudaFallocHeap));
// Allocate a print buffer on the device and zero it
void* deviceTrace;
if ((!error && (cudaMalloc((void**)&deviceTrace, TRACEHEAP_SIZE) != cudaSuccess)) ||
(error && ((*error = cudaMalloc((void**)&deviceTrace, TRACEHEAP_SIZE)) != cudaSuccess)))
return heap;
//
return cudaFallocInit(length, error, deviceTrace);
}
//
// cudaFallocWTraceEnd
//
// Frees up the memory which we allocated
//
extern "C" void cudaFallocWTraceEnd(cudaFallocHeap &heap) {
if (!heap.deviceHeap)
return;
cudaFree(heap.reserved); heap.reserved = nullptr;
//
cudaFallocEnd(heap);
}
//
// cuFallocSetTraceInfo
//
// Sets a trace Info.
//
extern "C" void cuFallocSetTraceInfo(size_t id, bool showDetail) {
}
//
// cudaFallocTraceInit
//
// Creates a trace Stream.
//
extern "C" fallocTrace* cudaFallocTraceInit() {
fallocTrace* trace = (fallocTrace*)malloc(sizeof(fallocTrace)); memset(trace, 0, sizeof(fallocTrace));
cudaMalloc(&trace->deviceTrace, sizeof(fallocTrace));
return trace;
}
//
// cudaFallocTraceStream
//
// Streams till empty.
//
extern "C" void* cudaFallocTraceStream(cudaFallocHeap &heap, fallocTrace* trace, size_t &length) {
if (trace->complete) {
length = 0;
return nullptr;
}
trace->trace = (volatile __int8*)heap.reserved;
size_t r = cudaMemcpy(trace->deviceTrace, trace, sizeof(fallocTrace), cudaMemcpyHostToDevice);
FallocWTrace<<<1, 1>>>(heap.deviceHeap, trace->deviceTrace);
cudaMemcpy(trace, trace->deviceTrace, sizeof(fallocTrace), cudaMemcpyDeviceToHost);
length = (__int8*)trace->trace - heap.reserved;
return heap.reserved;
}
//
// cudaFallocTraceEnd
//
// Frees a trace Stream.
//
extern "C" void cudaFallocTraceEnd(fallocTrace* trace) {
cudaFree(trace->deviceTrace);
free(trace);
}
#endif // CUFALLOCWTRACE_C
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.