hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
95b4eda061fa438450b38616044ede247b9efc0e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C" {
#ifndef REAL
#define REAL float
#endif
#ifndef CAST
#define CAST(fun) fun ## f
#endif
#ifndef REAL2o3
#define REAL2o3 (REAL)0.6666666666666667
#endif
#ifndef REAL3o2
#define REAL3o2 (REAL)1.5
#endif
}
__global__ void uplo_sqr (const int sd, const int unit, const int bottom, const REAL* a, const int offset_a, const int ld_a, REAL* b, const int offset_b, const int ld_b) {
const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x;
const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y;
const bool valid = (gid_0 < sd) && (gid_1 < sd);
const bool check = valid &&
((unit == 132) ? bottom * gid_0 > bottom * gid_1 : bottom * gid_0 >= bottom * gid_1);
if (check) {
const REAL aval = a[offset_a + gid_0 + gid_1 * ld_a];
b[offset_b + gid_0 + gid_1 * ld_b] = aval * aval;
}
} | 95b4eda061fa438450b38616044ede247b9efc0e.cu | #include "includes.h"
extern "C" {
#ifndef REAL
#define REAL float
#endif
#ifndef CAST
#define CAST(fun) fun ## f
#endif
#ifndef REAL2o3
#define REAL2o3 (REAL)0.6666666666666667
#endif
#ifndef REAL3o2
#define REAL3o2 (REAL)1.5
#endif
}
__global__ void uplo_sqr (const int sd, const int unit, const int bottom, const REAL* a, const int offset_a, const int ld_a, REAL* b, const int offset_b, const int ld_b) {
const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x;
const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y;
const bool valid = (gid_0 < sd) && (gid_1 < sd);
const bool check = valid &&
((unit == 132) ? bottom * gid_0 > bottom * gid_1 : bottom * gid_0 >= bottom * gid_1);
if (check) {
const REAL aval = a[offset_a + gid_0 + gid_1 * ld_a];
b[offset_b + gid_0 + gid_1 * ld_b] = aval * aval;
}
} |
21c001d4014b567ff40eaf072d497ca3068d15e7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// TanH neuron activation function layer.
// Adapted from ReLU layer code written by Yangqing Jia
#include <algorithm>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void TanHForward(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
Dtype exp2x = exp(2 * in[index]);
out[index] = (exp2x - Dtype(1)) / (exp2x + Dtype(1));
}
}
template <typename Dtype>
Dtype TanHLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = (*top)[0]->mutable_gpu_data();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( TanHForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, top_data);
CUDA_POST_KERNEL_CHECK;
return Dtype(0);
}
template <typename Dtype>
__global__ void TanHBackward(const int n, const Dtype* in_diff,
const Dtype* out_data, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
Dtype tanhx = out_data[index];
out_diff[index] = in_diff[index] * (1 - tanhx * tanhx);
}
}
template <typename Dtype>
void TanHLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
vector<Blob<Dtype>*>* bottom) {
if (propagate_down[0]) {
const Dtype* top_data = top[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
const int count = (*bottom)[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( TanHBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, top_data, bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_CLASS(TanHLayer);
} // namespace caffe
| 21c001d4014b567ff40eaf072d497ca3068d15e7.cu | // TanH neuron activation function layer.
// Adapted from ReLU layer code written by Yangqing Jia
#include <algorithm>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void TanHForward(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
Dtype exp2x = exp(2 * in[index]);
out[index] = (exp2x - Dtype(1)) / (exp2x + Dtype(1));
}
}
template <typename Dtype>
Dtype TanHLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = (*top)[0]->mutable_gpu_data();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
TanHForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, top_data);
CUDA_POST_KERNEL_CHECK;
return Dtype(0);
}
template <typename Dtype>
__global__ void TanHBackward(const int n, const Dtype* in_diff,
const Dtype* out_data, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
Dtype tanhx = out_data[index];
out_diff[index] = in_diff[index] * (1 - tanhx * tanhx);
}
}
template <typename Dtype>
void TanHLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
vector<Blob<Dtype>*>* bottom) {
if (propagate_down[0]) {
const Dtype* top_data = top[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
const int count = (*bottom)[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
TanHBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, top_data, bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_CLASS(TanHLayer);
} // namespace caffe
|
e8f0d1a4c782fb68b619a85e75bbe0864005525a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* From CUDA by Example. Modified.
* Adds two ints on the device
*/
#include <stdio.h>
__global__ void add(int a, int b, int *c) {
*c = a + b;
}
int main( void ) {
int c;
int *dev_c;
hipMalloc((void**)&dev_c, sizeof(int));
hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, 2, 7, dev_c);
hipMemcpy(&c, dev_c, sizeof(int), hipMemcpyDeviceToHost);
printf("2 + 7 = %d\n", c);
hipFree(dev_c);
}
| e8f0d1a4c782fb68b619a85e75bbe0864005525a.cu | /* From CUDA by Example. Modified.
* Adds two ints on the device
*/
#include <stdio.h>
__global__ void add(int a, int b, int *c) {
*c = a + b;
}
int main( void ) {
int c;
int *dev_c;
cudaMalloc((void**)&dev_c, sizeof(int));
add<<<1,1>>>(2, 7, dev_c);
cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost);
printf("2 + 7 = %d\n", c);
cudaFree(dev_c);
}
|
c8678c14d0853a8185ee2e231e4050e5a8a86629.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorMathReduce.cu"
#else
THC_API void
THCTensor_(sum)(THCState* state, THCTensor *self, THCTensor *src, int dimension, int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (!THC_reduceDim(state, self, src,
thrust::identity<real>(),
ReduceAdd<real, accreal>(),
ReduceAdd<accreal, accreal>(),
ScalarConvert<int, accreal>::to(0),
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
THC_API void
THCTensor_(prod)(THCState* state, THCTensor *self, THCTensor *src, int dimension, int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (!THC_reduceDim(state, self, src,
thrust::identity<real>(),
ReduceMultiply<real, accreal>(),
ReduceMultiply<accreal, accreal>(),
ScalarConvert<int, accreal>::to(1),
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
THC_API void
THCTensor_(mean)(THCState *state, THCTensor *self, THCTensor *src, int dim, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
THCTensor_(sum)(state, self, src, dim, keepdim);
THCTensor_(div)(state, self, self, ScalarConvert<int64_t, real>::to(THCTensor_(size)(state, src, dim)));
}
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
THC_API void
THCTensor_(renorm)(THCState *state, THCTensor* self, THCTensor* src, real value, int dimension, real maxnorm)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
THCTensor *self_;
THCTensor *src_ = THCTensor_(newTranspose)(state, src, dimension, 0);
THCTensor *data = THCTensor_(newClone)(state, src_);
ptrdiff_t size = THCTensor_(nElement)(state, data)/data->size[0];
THArgCheck(dimension >= 0 && dimension < THCTensor_(nDimension)(state, src), 3, "invalid dimension");
THArgCheck(THCNumerics<real>::gt(value, ScalarConvert<int, real>::to(0)), 2, "non-positive-norm not supported");
THArgCheck(THCTensor_(nDimension)(state, src) > 1, 1, "need at least 2 dimensions");
dim3 grid(data->size[0]);
dim3 threads(32);
hipLaunchKernelGGL(( THCTensor_kernel_renorm<real>), dim3(grid), dim3(threads), 0, THCState_getCurrentStream(state), THCTensor_(data)(state, data), value, size, maxnorm);
hipError_t errcode = hipGetLastError();
if(errcode != hipSuccess)
THError(hipGetErrorString(errcode));
THCTensor_(free)(state, src_);
self_ = THCTensor_(newTranspose)(state, data, dimension, 0);
THCTensor_(resizeAs)(state, self, self_);
THCTensor_(freeCopyTo)(state, self_, self);
THCTensor_(free)(state, data);
}
THC_API void
THCTensor_(std)(THCState *state, THCTensor *self_, THCTensor *src, int dimension, int biased, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
THLongStorage *dim = THCTensor_(newSizeOf)(state, src);
THLongStorage_set(dim, dimension, 1);
THCTensor_(resize)(state, self_, dim, NULL);
THLongStorage_free(dim);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
src = THCTensor_(newContiguous)(state, src);
if (dimension == THCTensor_(nDimension)(state, src) - 1) {
THCTensor_varInnermostDim<THCTensor, real, accreal, true>(state, self, src, biased);
} else {
THCTensor_varOuterDim<THCTensor, real, accreal, true>(state, self, src, dimension, biased);
}
THCTensor_(free)(state, src);
THCTensor_(freeCopyTo)(state, self, self_);
if (!keepdim) {
THCTensor_(squeeze1d)(state, self_, self_, dimension);
}
}
THC_API void
THCTensor_(var)(THCState *state, THCTensor *self_, THCTensor *src, int dimension, int biased, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
THLongStorage *dim = THCTensor_(newSizeOf)(state, src);
THLongStorage_set(dim, dimension, 1);
THCTensor_(resize)(state, self_, dim, NULL);
THLongStorage_free(dim);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
src = THCTensor_(newContiguous)(state, src);
if (dimension == THCTensor_(nDimension)(state, src) - 1) {
THCTensor_varInnermostDim<THCTensor, real, accreal, false>(state, self, src, biased);
} else {
THCTensor_varOuterDim<THCTensor, real, accreal, false>(state, self, src, dimension, biased);
}
THCTensor_(free)(state, src);
THCTensor_(freeCopyTo)(state, self, self_);
if (!keepdim) {
THCTensor_(squeeze1d)(state, self_, self_, dimension);
}
}
THC_API accreal
THCTensor_(stdall)(THCState *state, THCTensor *self, int biased)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
return THCNumerics<accreal>::sqrt((THCTensor_(varall)(state, self, biased)));
}
THC_API accreal
THCTensor_(varall)(THCState *state, THCTensor *self, int biased)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal mean = THCTensor_(meanall)(state, self);
accreal val;
if (!THC_reduceAll(state, self,
SquareFunctor<accreal, real>(mean),
ReduceAdd<accreal, accreal>(),
ReduceAdd<accreal, accreal>(),
ScalarConvert<int, accreal>::to(0),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
val = THCNumerics<accreal>::div(
val,
ScalarConvert<ptrdiff_t, accreal>::to(THCTensor_(nElement)(state, self) - (biased ? 0 : 1))
);
THCudaCheck(hipGetLastError());
return val;
}
THC_API void
THCTensor_(norm)(THCState *state, THCTensor* self, THCTensor* src, real value, int dimension, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (THCNumerics<real>::eq(value, ScalarConvert<float, real>::to(0.0))) {
THC_reduceDim(state, self, src,
TensorNonZeroOp<real>(), ReduceAdd<real, accreal>(), ReduceAdd<accreal, accreal>(),
ScalarConvert<float, accreal>::to(0.0), dimension, keepdim);
} else if (THCNumerics<real>::eq(value, ScalarConvert<float, real>::to(1.0))) {
THC_reduceDim(state, self, src,
TensorNormOp<real, 1>(value), ReduceAdd<real, accreal>(), ReduceAdd<accreal, accreal>(),
ScalarConvert<float, accreal>::to(0.0), dimension, keepdim);
} else if (THCNumerics<real>::eq(value, ScalarConvert<float, real>::to(2.0))) {
THC_reduceDim(state, self, src,
TensorNormOp<real, 2>(value), ReduceAdd<real, accreal>(), ReduceAdd<accreal, accreal>(),
ScalarConvert<float, accreal>::to(0.0), dimension, keepdim);
THCTensor_(pow)(state, self, self, ScalarConvert<float, real>::to(0.5));
} else {
THC_reduceDim(state, self, src,
TensorNormOp<real, -1>(value), ReduceAdd<real, accreal>(), ReduceAdd<accreal, accreal>(),
ScalarConvert<float, accreal>::to(0.0), dimension, keepdim);
THCTensor_(pow)(state, self, self, THCNumerics<real>::cinv(value));
}
THCudaCheck(hipGetLastError());
}
THC_API accreal
THCTensor_(normall)(THCState *state, THCTensor *self, real value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal result;
if (THCNumerics<real>::eq(value, ScalarConvert<float, real>::to(0.0))) {
THC_reduceAll(state, self,
TensorNonZeroOp<real>(),
ReduceAdd<real, accreal>(),
ReduceAdd<accreal, accreal>(),
ScalarConvert<float, accreal>::to(0.0f),
&result, 0);
} else if (THCNumerics<real>::eq(value, ScalarConvert<float, real>::to(1.0))) {
THC_reduceAll(state, self,
TensorNormOp<real, 1>(value),
ReduceAdd<real, accreal>(),
ReduceAdd<accreal, accreal>(),
ScalarConvert<float, accreal>::to(0.0f),
&result, 0);
} else if (THCNumerics<real>::eq(value, ScalarConvert<float, real>::to(2.0))) {
THC_reduceAll(state, self,
TensorNormOp<real, 2>(value),
ReduceAdd<real, accreal>(),
ReduceAdd<accreal, accreal>(),
ScalarConvert<float, accreal>::to(0.0f),
&result, 0);
result = THCNumerics<accreal>::sqrt(result);
} else {
THC_reduceAll(state, self,
TensorNormOp<real, -1>(value),
ReduceAdd<real, accreal>(),
ReduceAdd<accreal, accreal>(),
ScalarConvert<float, accreal>::to(0.0f),
&result, 0);
result = THCNumerics<accreal>::pow(
result,
ScalarConvert<real, accreal>::to(THCNumerics<real>::cinv(value))
);
}
THCudaCheck(hipGetLastError());
return result;
}
accreal THCTensor_(dist)(THCState *state, THCTensor *self,
THCTensor *src, real value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
self = THCTensor_(newContiguous)(state, self);
ptrdiff_t size = THCTensor_(nElement)(state, self);
src = THCTensor_(newContiguous)(state, src);
thrust::device_ptr<real> self_data(THCTensor_(data)(state, self));
thrust::device_ptr<real> src_data(THCTensor_(data)(state, src));
THCThrustAllocator thrustAlloc(state);
accreal result = thrust::inner_product(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
self_data, self_data+size, src_data, ScalarConvert<int, accreal>::to(0),
thrust::plus<accreal>(),
TensorDistOp<accreal, real>(ScalarConvert<real, accreal>::to(value)));
THCTensor_(free)(state, src);
THCTensor_(free)(state, self);
return THCNumerics<accreal>::pow(result, 1.0 / ScalarConvert<real, accreal>::to(value));
}
#endif
THC_API accreal
THCTensor_(sumall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll(state, self,
thrust::identity<real>(),
ReduceAdd<real, accreal>(),
ReduceAdd<accreal, accreal>(),
ScalarConvert<int, accreal>::to(0),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
return val;
}
THC_API accreal
THCTensor_(prodall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll(state, self,
thrust::identity<real>(),
ReduceMultiply<real, accreal>(),
ReduceMultiply<accreal, accreal>(),
ScalarConvert<int, accreal>::to(1),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
return val;
}
THC_API accreal
THCTensor_(meanall)(THCState *state, THCTensor *self)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
THArgCheck(self->nDimension > 0, 1, "empty Tensor");
return THCTensor_(sumall)(state, self)/THCTensor_(nElement)(state, self);
}
THC_API real
THCTensor_(minall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
real val;
if (!THC_reduceAll(state, self,
thrust::identity<real>(),
ReduceMin<real>(),
ReduceMin<real>(),
THCNumerics<real>::max(), &val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
return val;
}
THC_API real
THCTensor_(maxall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
real val;
if (!THC_reduceAll(state, self,
thrust::identity<real>(),
ReduceMax<real>(),
ReduceMax<real>(),
THCNumerics<real>::min(), &val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
return val;
}
THC_API real
THCTensor_(medianall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
real val;
ptrdiff_t nelem, k;
nelem = THCTensor_(nElement)(state, self);
k = (nelem-1) >> 1;
THLongStorage *size = THLongStorage_newWithSize1(nelem);
THCTensor *view = THCTensor_(newView)(state, self, size);
THLongStorage_free(size);
THCTensor *sorted = THCTensor_(new)(state);
THCudaLongTensor *indices = THCudaLongTensor_new(state);
THCTensor_(sort)(state, sorted, indices, view, 0, 0);
val = THCTensor_(get1d)(state, sorted, k);
THCTensor_(free)(state, view);
THCTensor_(free)(state, sorted);
THCudaLongTensor_free(state, indices);
THCudaCheck(hipGetLastError());
return val;
}
THC_API void
THCTensor_(median)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *self,
int dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
int64_t t_size_dim, k;
t_size_dim = THCTensor_(size)(state, self, dimension);
k = (t_size_dim-1) >> 1;
THCTensor *sorted = THCTensor_(new)(state);
THCudaLongTensor *sorted_indices = THCudaLongTensor_new(state);
THCTensor_(sort)(state, sorted, sorted_indices, self, dimension, 0);
THCTensor_(narrow)(state, values, sorted, dimension, k, 1);
THCudaLongTensor_narrow(state, indices, sorted_indices, dimension, k, 1);
THCTensor_(free)(state, sorted);
THCudaLongTensor_free(state, sorted_indices);
if (!keepdim) {
THCTensor_(squeeze1d)(state, values, values, dimension);
THCudaLongTensor_squeeze1d(state, indices, indices, dimension);
}
THCudaCheck(hipGetLastError());
}
THC_API void
THCTensor_(max)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *src,
int dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src));
thrust::pair<typename TensorUtils<THCTensor>::DataType, int64_t>
init =
thrust::make_pair<typename TensorUtils<THCTensor>::DataType, int64_t>(
THCNumerics<typename TensorUtils<THCTensor>::DataType>::min(), 1);
return THC_reduceDimIndex(
state, values, indices, src, dimension, keepdim, init,
MaxValuePair<typename TensorUtils<THCTensor>::DataType, int64_t>());
}
THC_API void
THCTensor_(min)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *src,
int dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src));
thrust::pair<typename TensorUtils<THCTensor>::DataType, int64_t>
init =
thrust::make_pair<typename TensorUtils<THCTensor>::DataType, int64_t>(
THCNumerics<typename TensorUtils<THCTensor>::DataType>::max(), 1);
return THC_reduceDimIndex(
state, values, indices, src, dimension, keepdim, init,
MinValuePair<typename TensorUtils<THCTensor>::DataType, int64_t>());
}
#endif
| c8678c14d0853a8185ee2e231e4050e5a8a86629.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorMathReduce.cu"
#else
THC_API void
THCTensor_(sum)(THCState* state, THCTensor *self, THCTensor *src, int dimension, int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (!THC_reduceDim(state, self, src,
thrust::identity<real>(),
ReduceAdd<real, accreal>(),
ReduceAdd<accreal, accreal>(),
ScalarConvert<int, accreal>::to(0),
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
THC_API void
THCTensor_(prod)(THCState* state, THCTensor *self, THCTensor *src, int dimension, int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (!THC_reduceDim(state, self, src,
thrust::identity<real>(),
ReduceMultiply<real, accreal>(),
ReduceMultiply<accreal, accreal>(),
ScalarConvert<int, accreal>::to(1),
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
THC_API void
THCTensor_(mean)(THCState *state, THCTensor *self, THCTensor *src, int dim, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
THCTensor_(sum)(state, self, src, dim, keepdim);
THCTensor_(div)(state, self, self, ScalarConvert<int64_t, real>::to(THCTensor_(size)(state, src, dim)));
}
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
THC_API void
THCTensor_(renorm)(THCState *state, THCTensor* self, THCTensor* src, real value, int dimension, real maxnorm)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
THCTensor *self_;
THCTensor *src_ = THCTensor_(newTranspose)(state, src, dimension, 0);
THCTensor *data = THCTensor_(newClone)(state, src_);
ptrdiff_t size = THCTensor_(nElement)(state, data)/data->size[0];
THArgCheck(dimension >= 0 && dimension < THCTensor_(nDimension)(state, src), 3, "invalid dimension");
THArgCheck(THCNumerics<real>::gt(value, ScalarConvert<int, real>::to(0)), 2, "non-positive-norm not supported");
THArgCheck(THCTensor_(nDimension)(state, src) > 1, 1, "need at least 2 dimensions");
dim3 grid(data->size[0]);
dim3 threads(32);
THCTensor_kernel_renorm<real><<<grid, threads, 0, THCState_getCurrentStream(state)>>>(THCTensor_(data)(state, data), value, size, maxnorm);
cudaError errcode = cudaGetLastError();
if(errcode != cudaSuccess)
THError(cudaGetErrorString(errcode));
THCTensor_(free)(state, src_);
self_ = THCTensor_(newTranspose)(state, data, dimension, 0);
THCTensor_(resizeAs)(state, self, self_);
THCTensor_(freeCopyTo)(state, self_, self);
THCTensor_(free)(state, data);
}
THC_API void
THCTensor_(std)(THCState *state, THCTensor *self_, THCTensor *src, int dimension, int biased, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
THLongStorage *dim = THCTensor_(newSizeOf)(state, src);
THLongStorage_set(dim, dimension, 1);
THCTensor_(resize)(state, self_, dim, NULL);
THLongStorage_free(dim);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
src = THCTensor_(newContiguous)(state, src);
if (dimension == THCTensor_(nDimension)(state, src) - 1) {
THCTensor_varInnermostDim<THCTensor, real, accreal, true>(state, self, src, biased);
} else {
THCTensor_varOuterDim<THCTensor, real, accreal, true>(state, self, src, dimension, biased);
}
THCTensor_(free)(state, src);
THCTensor_(freeCopyTo)(state, self, self_);
if (!keepdim) {
THCTensor_(squeeze1d)(state, self_, self_, dimension);
}
}
THC_API void
THCTensor_(var)(THCState *state, THCTensor *self_, THCTensor *src, int dimension, int biased, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
THLongStorage *dim = THCTensor_(newSizeOf)(state, src);
THLongStorage_set(dim, dimension, 1);
THCTensor_(resize)(state, self_, dim, NULL);
THLongStorage_free(dim);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
src = THCTensor_(newContiguous)(state, src);
if (dimension == THCTensor_(nDimension)(state, src) - 1) {
THCTensor_varInnermostDim<THCTensor, real, accreal, false>(state, self, src, biased);
} else {
THCTensor_varOuterDim<THCTensor, real, accreal, false>(state, self, src, dimension, biased);
}
THCTensor_(free)(state, src);
THCTensor_(freeCopyTo)(state, self, self_);
if (!keepdim) {
THCTensor_(squeeze1d)(state, self_, self_, dimension);
}
}
THC_API accreal
THCTensor_(stdall)(THCState *state, THCTensor *self, int biased)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
return THCNumerics<accreal>::sqrt((THCTensor_(varall)(state, self, biased)));
}
THC_API accreal
THCTensor_(varall)(THCState *state, THCTensor *self, int biased)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal mean = THCTensor_(meanall)(state, self);
accreal val;
if (!THC_reduceAll(state, self,
SquareFunctor<accreal, real>(mean),
ReduceAdd<accreal, accreal>(),
ReduceAdd<accreal, accreal>(),
ScalarConvert<int, accreal>::to(0),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
val = THCNumerics<accreal>::div(
val,
ScalarConvert<ptrdiff_t, accreal>::to(THCTensor_(nElement)(state, self) - (biased ? 0 : 1))
);
THCudaCheck(cudaGetLastError());
return val;
}
THC_API void
THCTensor_(norm)(THCState *state, THCTensor* self, THCTensor* src, real value, int dimension, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (THCNumerics<real>::eq(value, ScalarConvert<float, real>::to(0.0))) {
THC_reduceDim(state, self, src,
TensorNonZeroOp<real>(), ReduceAdd<real, accreal>(), ReduceAdd<accreal, accreal>(),
ScalarConvert<float, accreal>::to(0.0), dimension, keepdim);
} else if (THCNumerics<real>::eq(value, ScalarConvert<float, real>::to(1.0))) {
THC_reduceDim(state, self, src,
TensorNormOp<real, 1>(value), ReduceAdd<real, accreal>(), ReduceAdd<accreal, accreal>(),
ScalarConvert<float, accreal>::to(0.0), dimension, keepdim);
} else if (THCNumerics<real>::eq(value, ScalarConvert<float, real>::to(2.0))) {
THC_reduceDim(state, self, src,
TensorNormOp<real, 2>(value), ReduceAdd<real, accreal>(), ReduceAdd<accreal, accreal>(),
ScalarConvert<float, accreal>::to(0.0), dimension, keepdim);
THCTensor_(pow)(state, self, self, ScalarConvert<float, real>::to(0.5));
} else {
THC_reduceDim(state, self, src,
TensorNormOp<real, -1>(value), ReduceAdd<real, accreal>(), ReduceAdd<accreal, accreal>(),
ScalarConvert<float, accreal>::to(0.0), dimension, keepdim);
THCTensor_(pow)(state, self, self, THCNumerics<real>::cinv(value));
}
THCudaCheck(cudaGetLastError());
}
THC_API accreal
THCTensor_(normall)(THCState *state, THCTensor *self, real value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal result;
if (THCNumerics<real>::eq(value, ScalarConvert<float, real>::to(0.0))) {
THC_reduceAll(state, self,
TensorNonZeroOp<real>(),
ReduceAdd<real, accreal>(),
ReduceAdd<accreal, accreal>(),
ScalarConvert<float, accreal>::to(0.0f),
&result, 0);
} else if (THCNumerics<real>::eq(value, ScalarConvert<float, real>::to(1.0))) {
THC_reduceAll(state, self,
TensorNormOp<real, 1>(value),
ReduceAdd<real, accreal>(),
ReduceAdd<accreal, accreal>(),
ScalarConvert<float, accreal>::to(0.0f),
&result, 0);
} else if (THCNumerics<real>::eq(value, ScalarConvert<float, real>::to(2.0))) {
THC_reduceAll(state, self,
TensorNormOp<real, 2>(value),
ReduceAdd<real, accreal>(),
ReduceAdd<accreal, accreal>(),
ScalarConvert<float, accreal>::to(0.0f),
&result, 0);
result = THCNumerics<accreal>::sqrt(result);
} else {
THC_reduceAll(state, self,
TensorNormOp<real, -1>(value),
ReduceAdd<real, accreal>(),
ReduceAdd<accreal, accreal>(),
ScalarConvert<float, accreal>::to(0.0f),
&result, 0);
result = THCNumerics<accreal>::pow(
result,
ScalarConvert<real, accreal>::to(THCNumerics<real>::cinv(value))
);
}
THCudaCheck(cudaGetLastError());
return result;
}
accreal THCTensor_(dist)(THCState *state, THCTensor *self,
THCTensor *src, real value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
self = THCTensor_(newContiguous)(state, self);
ptrdiff_t size = THCTensor_(nElement)(state, self);
src = THCTensor_(newContiguous)(state, src);
thrust::device_ptr<real> self_data(THCTensor_(data)(state, self));
thrust::device_ptr<real> src_data(THCTensor_(data)(state, src));
THCThrustAllocator thrustAlloc(state);
accreal result = thrust::inner_product(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
self_data, self_data+size, src_data, ScalarConvert<int, accreal>::to(0),
thrust::plus<accreal>(),
TensorDistOp<accreal, real>(ScalarConvert<real, accreal>::to(value)));
THCTensor_(free)(state, src);
THCTensor_(free)(state, self);
return THCNumerics<accreal>::pow(result, 1.0 / ScalarConvert<real, accreal>::to(value));
}
#endif
THC_API accreal
THCTensor_(sumall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll(state, self,
thrust::identity<real>(),
ReduceAdd<real, accreal>(),
ReduceAdd<accreal, accreal>(),
ScalarConvert<int, accreal>::to(0),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
return val;
}
THC_API accreal
THCTensor_(prodall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll(state, self,
thrust::identity<real>(),
ReduceMultiply<real, accreal>(),
ReduceMultiply<accreal, accreal>(),
ScalarConvert<int, accreal>::to(1),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
return val;
}
THC_API accreal
THCTensor_(meanall)(THCState *state, THCTensor *self)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
THArgCheck(self->nDimension > 0, 1, "empty Tensor");
return THCTensor_(sumall)(state, self)/THCTensor_(nElement)(state, self);
}
THC_API real
THCTensor_(minall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
real val;
if (!THC_reduceAll(state, self,
thrust::identity<real>(),
ReduceMin<real>(),
ReduceMin<real>(),
THCNumerics<real>::max(), &val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
return val;
}
THC_API real
THCTensor_(maxall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
real val;
if (!THC_reduceAll(state, self,
thrust::identity<real>(),
ReduceMax<real>(),
ReduceMax<real>(),
THCNumerics<real>::min(), &val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
return val;
}
THC_API real
THCTensor_(medianall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
real val;
ptrdiff_t nelem, k;
nelem = THCTensor_(nElement)(state, self);
k = (nelem-1) >> 1;
THLongStorage *size = THLongStorage_newWithSize1(nelem);
THCTensor *view = THCTensor_(newView)(state, self, size);
THLongStorage_free(size);
THCTensor *sorted = THCTensor_(new)(state);
THCudaLongTensor *indices = THCudaLongTensor_new(state);
THCTensor_(sort)(state, sorted, indices, view, 0, 0);
val = THCTensor_(get1d)(state, sorted, k);
THCTensor_(free)(state, view);
THCTensor_(free)(state, sorted);
THCudaLongTensor_free(state, indices);
THCudaCheck(cudaGetLastError());
return val;
}
THC_API void
THCTensor_(median)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *self,
int dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
int64_t t_size_dim, k;
t_size_dim = THCTensor_(size)(state, self, dimension);
k = (t_size_dim-1) >> 1;
THCTensor *sorted = THCTensor_(new)(state);
THCudaLongTensor *sorted_indices = THCudaLongTensor_new(state);
THCTensor_(sort)(state, sorted, sorted_indices, self, dimension, 0);
THCTensor_(narrow)(state, values, sorted, dimension, k, 1);
THCudaLongTensor_narrow(state, indices, sorted_indices, dimension, k, 1);
THCTensor_(free)(state, sorted);
THCudaLongTensor_free(state, sorted_indices);
if (!keepdim) {
THCTensor_(squeeze1d)(state, values, values, dimension);
THCudaLongTensor_squeeze1d(state, indices, indices, dimension);
}
THCudaCheck(cudaGetLastError());
}
THC_API void
THCTensor_(max)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *src,
int dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src));
thrust::pair<typename TensorUtils<THCTensor>::DataType, int64_t>
init =
thrust::make_pair<typename TensorUtils<THCTensor>::DataType, int64_t>(
THCNumerics<typename TensorUtils<THCTensor>::DataType>::min(), 1);
return THC_reduceDimIndex(
state, values, indices, src, dimension, keepdim, init,
MaxValuePair<typename TensorUtils<THCTensor>::DataType, int64_t>());
}
THC_API void
THCTensor_(min)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *src,
int dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src));
thrust::pair<typename TensorUtils<THCTensor>::DataType, int64_t>
init =
thrust::make_pair<typename TensorUtils<THCTensor>::DataType, int64_t>(
THCNumerics<typename TensorUtils<THCTensor>::DataType>::max(), 1);
return THC_reduceDimIndex(
state, values, indices, src, dimension, keepdim, init,
MinValuePair<typename TensorUtils<THCTensor>::DataType, int64_t>());
}
#endif
|
3ffc2de88cc70967afca0cf5f1e3bb6a1850da44.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
const int N = 16;
const int blocksize = 16;
__global__
void hello(char *a, int *b)
{
a[threadIdx.x] += b[threadIdx.x];
}
int main()
{
char a[N] = "Hello \0\0\0\0\0\0";
int b[N] = {15, 10, 6, 0, -11, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
char *ad;
int *bd;
const int csize = N*sizeof(char);
const int isize = N*sizeof(int);
printf("%s", a);
hipMalloc( (void**)&ad, csize );
hipMalloc( (void**)&bd, isize );
hipMemcpy( ad, a, csize, hipMemcpyHostToDevice );
hipMemcpy( bd, b, isize, hipMemcpyHostToDevice );
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
hipLaunchKernelGGL(( hello), dim3(dimGrid), dim3(dimBlock), 0, 0, ad, bd);
hipError_t err = hipGetLastError();
if (err != hipSuccess)
printf("Error: %s\n", hipGetErrorString(err));
hipMemcpy( a, ad, csize, hipMemcpyDeviceToHost );
hipFree( ad );
hipFree( bd );
printf("%s\n", a);
return EXIT_SUCCESS;
}
| 3ffc2de88cc70967afca0cf5f1e3bb6a1850da44.cu | #include <stdio.h>
const int N = 16;
const int blocksize = 16;
__global__
void hello(char *a, int *b)
{
a[threadIdx.x] += b[threadIdx.x];
}
int main()
{
char a[N] = "Hello \0\0\0\0\0\0";
int b[N] = {15, 10, 6, 0, -11, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
char *ad;
int *bd;
const int csize = N*sizeof(char);
const int isize = N*sizeof(int);
printf("%s", a);
cudaMalloc( (void**)&ad, csize );
cudaMalloc( (void**)&bd, isize );
cudaMemcpy( ad, a, csize, cudaMemcpyHostToDevice );
cudaMemcpy( bd, b, isize, cudaMemcpyHostToDevice );
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
hello<<<dimGrid, dimBlock>>>(ad, bd);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
cudaMemcpy( a, ad, csize, cudaMemcpyDeviceToHost );
cudaFree( ad );
cudaFree( bd );
printf("%s\n", a);
return EXIT_SUCCESS;
}
|
0a285742eba0d4ab4109bf1581e8e34058e11ce4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*autor: Alejandro Ferro Bejerano*/
#include <stdio.h>
#include "image.h"
#define TRUE 1
#define FALSE 0
#define SIZE IMAGE_WIDTH * IMAGE_HEIGHT
#define RangoColores 256
#define Nbloques 1
#define NThreads 256
__global__ void histograma_kernel(unsigned char *img, unsigned int *hist){
/*Inicializamos histograma temporal en memoria compartida a 0*/
__shared__ unsigned int temp[RangoColores];
temp[threadIdx.x]=0;
__syncthreads();
int posicion = threadIdx.x + blockIdx.x * blockDim.x;
int desplazamiento = blockDim.x * gridDim.x;
while(posicion < SIZE){
/*Bloquea la variable de memoria compartida para que no escriban en la misma */
atomicAdd(&temp[img[posicion]], 1);
posicion +=desplazamiento;
}
/*Esperamos a que todos lo hilos hayan terminado */
__syncthreads();
/*Copiamos de nuestra memoria compartida a nuestro histograma*/
atomicAdd( &(hist[threadIdx.x]), temp[threadIdx.x]);
}
int main(void){
int hist_correcto = FALSE;
/*Cargamos la imagen*/
unsigned char *img =(unsigned char*)image;
/*Declaramos el array histograma y los punteros a la imagen y al histograma en memoria*/
unsigned int histograma[RangoColores];
unsigned char *dev_image;
unsigned int *dev_histograma;
/*Reservamos memoria e inicializamos a 0
todo el rango donde se almacenara el histograma*/
hipMalloc((void**) &dev_image, SIZE);
hipMalloc((void**) &dev_histograma, RangoColores * sizeof(int));
hipMemset( dev_histograma, 0,RangoColores * sizeof( int ) );
hipMemcpy(dev_image, img, SIZE, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( histograma_kernel), dim3(Nbloques),dim3(NThreads), 0, 0, dev_image,dev_histograma);
hipMemcpy(histograma, dev_histograma, RangoColores * sizeof(int), hipMemcpyDeviceToHost);
/*Comprobamos uqe el clculo se ha hecho correctamente haciendo
la operacin inversa con la CPU*/
for (int i=0; i<SIZE; i++) histograma[img[i]]--;
for(int i=0; i< RangoColores; i++){
if (histograma[i] !=0){
printf("\nError: El clculo del histograma, no corresponde con el generado por la CPU\n\n");
hist_correcto = FALSE;
exit(-1);
}else{
hist_correcto = TRUE;
}
}
if(hist_correcto == TRUE) printf("Histograma generado correctamente, ;-)\n\n");
hipFree(dev_image);
hipFree(dev_histograma);
return 0;
}
| 0a285742eba0d4ab4109bf1581e8e34058e11ce4.cu | /*autor: Alejandro Ferro Bejerano*/
#include <stdio.h>
#include "image.h"
#define TRUE 1
#define FALSE 0
#define SIZE IMAGE_WIDTH * IMAGE_HEIGHT
#define RangoColores 256
#define Nbloques 1
#define NThreads 256
__global__ void histograma_kernel(unsigned char *img, unsigned int *hist){
/*Inicializamos histograma temporal en memoria compartida a 0*/
__shared__ unsigned int temp[RangoColores];
temp[threadIdx.x]=0;
__syncthreads();
int posicion = threadIdx.x + blockIdx.x * blockDim.x;
int desplazamiento = blockDim.x * gridDim.x;
while(posicion < SIZE){
/*Bloquea la variable de memoria compartida para que no escriban en la misma */
atomicAdd(&temp[img[posicion]], 1);
posicion +=desplazamiento;
}
/*Esperamos a que todos lo hilos hayan terminado */
__syncthreads();
/*Copiamos de nuestra memoria compartida a nuestro histograma*/
atomicAdd( &(hist[threadIdx.x]), temp[threadIdx.x]);
}
int main(void){
int hist_correcto = FALSE;
/*Cargamos la imagen*/
unsigned char *img =(unsigned char*)image;
/*Declaramos el array histograma y los punteros a la imagen y al histograma en memoria*/
unsigned int histograma[RangoColores];
unsigned char *dev_image;
unsigned int *dev_histograma;
/*Reservamos memoria e inicializamos a 0
todo el rango donde se almacenara el histograma*/
cudaMalloc((void**) &dev_image, SIZE);
cudaMalloc((void**) &dev_histograma, RangoColores * sizeof(int));
cudaMemset( dev_histograma, 0,RangoColores * sizeof( int ) );
cudaMemcpy(dev_image, img, SIZE, cudaMemcpyHostToDevice);
histograma_kernel<<<Nbloques,NThreads>>>(dev_image,dev_histograma);
cudaMemcpy(histograma, dev_histograma, RangoColores * sizeof(int), cudaMemcpyDeviceToHost);
/*Comprobamos uqe el cálculo se ha hecho correctamente haciendo
la operación inversa con la CPU*/
for (int i=0; i<SIZE; i++) histograma[img[i]]--;
for(int i=0; i< RangoColores; i++){
if (histograma[i] !=0){
printf("\nError: El cálculo del histograma, no corresponde con el generado por la CPU\n\n");
hist_correcto = FALSE;
exit(-1);
}else{
hist_correcto = TRUE;
}
}
if(hist_correcto == TRUE) printf("Histograma generado correctamente, ;-)\n\n");
cudaFree(dev_image);
cudaFree(dev_histograma);
return 0;
}
|
5e46c8d46d129024c67ab6a5e22a1b03d16d6bb1.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
typedef unsigned long long int u64;
typedef unsigned int u32;
typedef unsigned short u16;
typedef unsigned char u8;
#define ENDIAN_SELECTOR 0x00000123
#define GETU32(plaintext) \
__byte_perm(*(u32*)(plaintext), 0, ENDIAN_SELECTOR)
#define PUTU32(ciphertext, st) \
{*(u32*)(ciphertext) = __byte_perm((st), 0, ENDIAN_SELECTOR);}
__constant__ u32 Te0[256] =
{
0xc66363a5U, 0xf87c7c84U, 0xee777799U, 0xf67b7b8dU,
0xfff2f20dU, 0xd66b6bbdU, 0xde6f6fb1U, 0x91c5c554U,
0x60303050U, 0x02010103U, 0xce6767a9U, 0x562b2b7dU,
0xe7fefe19U, 0xb5d7d762U, 0x4dababe6U, 0xec76769aU,
0x8fcaca45U, 0x1f82829dU, 0x89c9c940U, 0xfa7d7d87U,
0xeffafa15U, 0xb25959ebU, 0x8e4747c9U, 0xfbf0f00bU,
0x41adadecU, 0xb3d4d467U, 0x5fa2a2fdU, 0x45afafeaU,
0x239c9cbfU, 0x53a4a4f7U, 0xe4727296U, 0x9bc0c05bU,
0x75b7b7c2U, 0xe1fdfd1cU, 0x3d9393aeU, 0x4c26266aU,
0x6c36365aU, 0x7e3f3f41U, 0xf5f7f702U, 0x83cccc4fU,
0x6834345cU, 0x51a5a5f4U, 0xd1e5e534U, 0xf9f1f108U,
0xe2717193U, 0xabd8d873U, 0x62313153U, 0x2a15153fU,
0x0804040cU, 0x95c7c752U, 0x46232365U, 0x9dc3c35eU,
0x30181828U, 0x379696a1U, 0x0a05050fU, 0x2f9a9ab5U,
0x0e070709U, 0x24121236U, 0x1b80809bU, 0xdfe2e23dU,
0xcdebeb26U, 0x4e272769U, 0x7fb2b2cdU, 0xea75759fU,
0x1209091bU, 0x1d83839eU, 0x582c2c74U, 0x341a1a2eU,
0x361b1b2dU, 0xdc6e6eb2U, 0xb45a5aeeU, 0x5ba0a0fbU,
0xa45252f6U, 0x763b3b4dU, 0xb7d6d661U, 0x7db3b3ceU,
0x5229297bU, 0xdde3e33eU, 0x5e2f2f71U, 0x13848497U,
0xa65353f5U, 0xb9d1d168U, 0x00000000U, 0xc1eded2cU,
0x40202060U, 0xe3fcfc1fU, 0x79b1b1c8U, 0xb65b5bedU,
0xd46a6abeU, 0x8dcbcb46U, 0x67bebed9U, 0x7239394bU,
0x944a4adeU, 0x984c4cd4U, 0xb05858e8U, 0x85cfcf4aU,
0xbbd0d06bU, 0xc5efef2aU, 0x4faaaae5U, 0xedfbfb16U,
0x864343c5U, 0x9a4d4dd7U, 0x66333355U, 0x11858594U,
0x8a4545cfU, 0xe9f9f910U, 0x04020206U, 0xfe7f7f81U,
0xa05050f0U, 0x783c3c44U, 0x259f9fbaU, 0x4ba8a8e3U,
0xa25151f3U, 0x5da3a3feU, 0x804040c0U, 0x058f8f8aU,
0x3f9292adU, 0x219d9dbcU, 0x70383848U, 0xf1f5f504U,
0x63bcbcdfU, 0x77b6b6c1U, 0xafdada75U, 0x42212163U,
0x20101030U, 0xe5ffff1aU, 0xfdf3f30eU, 0xbfd2d26dU,
0x81cdcd4cU, 0x180c0c14U, 0x26131335U, 0xc3ecec2fU,
0xbe5f5fe1U, 0x359797a2U, 0x884444ccU, 0x2e171739U,
0x93c4c457U, 0x55a7a7f2U, 0xfc7e7e82U, 0x7a3d3d47U,
0xc86464acU, 0xba5d5de7U, 0x3219192bU, 0xe6737395U,
0xc06060a0U, 0x19818198U, 0x9e4f4fd1U, 0xa3dcdc7fU,
0x44222266U, 0x542a2a7eU, 0x3b9090abU, 0x0b888883U,
0x8c4646caU, 0xc7eeee29U, 0x6bb8b8d3U, 0x2814143cU,
0xa7dede79U, 0xbc5e5ee2U, 0x160b0b1dU, 0xaddbdb76U,
0xdbe0e03bU, 0x64323256U, 0x743a3a4eU, 0x140a0a1eU,
0x924949dbU, 0x0c06060aU, 0x4824246cU, 0xb85c5ce4U,
0x9fc2c25dU, 0xbdd3d36eU, 0x43acacefU, 0xc46262a6U,
0x399191a8U, 0x319595a4U, 0xd3e4e437U, 0xf279798bU,
0xd5e7e732U, 0x8bc8c843U, 0x6e373759U, 0xda6d6db7U,
0x018d8d8cU, 0xb1d5d564U, 0x9c4e4ed2U, 0x49a9a9e0U,
0xd86c6cb4U, 0xac5656faU, 0xf3f4f407U, 0xcfeaea25U,
0xca6565afU, 0xf47a7a8eU, 0x47aeaee9U, 0x10080818U,
0x6fbabad5U, 0xf0787888U, 0x4a25256fU, 0x5c2e2e72U,
0x381c1c24U, 0x57a6a6f1U, 0x73b4b4c7U, 0x97c6c651U,
0xcbe8e823U, 0xa1dddd7cU, 0xe874749cU, 0x3e1f1f21U,
0x964b4bddU, 0x61bdbddcU, 0x0d8b8b86U, 0x0f8a8a85U,
0xe0707090U, 0x7c3e3e42U, 0x71b5b5c4U, 0xcc6666aaU,
0x904848d8U, 0x06030305U, 0xf7f6f601U, 0x1c0e0e12U,
0xc26161a3U, 0x6a35355fU, 0xae5757f9U, 0x69b9b9d0U,
0x17868691U, 0x99c1c158U, 0x3a1d1d27U, 0x279e9eb9U,
0xd9e1e138U, 0xebf8f813U, 0x2b9898b3U, 0x22111133U,
0xd26969bbU, 0xa9d9d970U, 0x078e8e89U, 0x339494a7U,
0x2d9b9bb6U, 0x3c1e1e22U, 0x15878792U, 0xc9e9e920U,
0x87cece49U, 0xaa5555ffU, 0x50282878U, 0xa5dfdf7aU,
0x038c8c8fU, 0x59a1a1f8U, 0x09898980U, 0x1a0d0d17U,
0x65bfbfdaU, 0xd7e6e631U, 0x844242c6U, 0xd06868b8U,
0x824141c3U, 0x299999b0U, 0x5a2d2d77U, 0x1e0f0f11U,
0x7bb0b0cbU, 0xa85454fcU, 0x6dbbbbd6U, 0x2c16163aU,
};
__constant__ u32 Te1[256] =
{
0xa5c66363U, 0x84f87c7cU, 0x99ee7777U, 0x8df67b7bU,
0x0dfff2f2U, 0xbdd66b6bU, 0xb1de6f6fU, 0x5491c5c5U,
0x50603030U, 0x03020101U, 0xa9ce6767U, 0x7d562b2bU,
0x19e7fefeU, 0x62b5d7d7U, 0xe64dababU, 0x9aec7676U,
0x458fcacaU, 0x9d1f8282U, 0x4089c9c9U, 0x87fa7d7dU,
0x15effafaU, 0xebb25959U, 0xc98e4747U, 0x0bfbf0f0U,
0xec41adadU, 0x67b3d4d4U, 0xfd5fa2a2U, 0xea45afafU,
0xbf239c9cU, 0xf753a4a4U, 0x96e47272U, 0x5b9bc0c0U,
0xc275b7b7U, 0x1ce1fdfdU, 0xae3d9393U, 0x6a4c2626U,
0x5a6c3636U, 0x417e3f3fU, 0x02f5f7f7U, 0x4f83ccccU,
0x5c683434U, 0xf451a5a5U, 0x34d1e5e5U, 0x08f9f1f1U,
0x93e27171U, 0x73abd8d8U, 0x53623131U, 0x3f2a1515U,
0x0c080404U, 0x5295c7c7U, 0x65462323U, 0x5e9dc3c3U,
0x28301818U, 0xa1379696U, 0x0f0a0505U, 0xb52f9a9aU,
0x090e0707U, 0x36241212U, 0x9b1b8080U, 0x3ddfe2e2U,
0x26cdebebU, 0x694e2727U, 0xcd7fb2b2U, 0x9fea7575U,
0x1b120909U, 0x9e1d8383U, 0x74582c2cU, 0x2e341a1aU,
0x2d361b1bU, 0xb2dc6e6eU, 0xeeb45a5aU, 0xfb5ba0a0U,
0xf6a45252U, 0x4d763b3bU, 0x61b7d6d6U, 0xce7db3b3U,
0x7b522929U, 0x3edde3e3U, 0x715e2f2fU, 0x97138484U,
0xf5a65353U, 0x68b9d1d1U, 0x00000000U, 0x2cc1ededU,
0x60402020U, 0x1fe3fcfcU, 0xc879b1b1U, 0xedb65b5bU,
0xbed46a6aU, 0x468dcbcbU, 0xd967bebeU, 0x4b723939U,
0xde944a4aU, 0xd4984c4cU, 0xe8b05858U, 0x4a85cfcfU,
0x6bbbd0d0U, 0x2ac5efefU, 0xe54faaaaU, 0x16edfbfbU,
0xc5864343U, 0xd79a4d4dU, 0x55663333U, 0x94118585U,
0xcf8a4545U, 0x10e9f9f9U, 0x06040202U, 0x81fe7f7fU,
0xf0a05050U, 0x44783c3cU, 0xba259f9fU, 0xe34ba8a8U,
0xf3a25151U, 0xfe5da3a3U, 0xc0804040U, 0x8a058f8fU,
0xad3f9292U, 0xbc219d9dU, 0x48703838U, 0x04f1f5f5U,
0xdf63bcbcU, 0xc177b6b6U, 0x75afdadaU, 0x63422121U,
0x30201010U, 0x1ae5ffffU, 0x0efdf3f3U, 0x6dbfd2d2U,
0x4c81cdcdU, 0x14180c0cU, 0x35261313U, 0x2fc3ececU,
0xe1be5f5fU, 0xa2359797U, 0xcc884444U, 0x392e1717U,
0x5793c4c4U, 0xf255a7a7U, 0x82fc7e7eU, 0x477a3d3dU,
0xacc86464U, 0xe7ba5d5dU, 0x2b321919U, 0x95e67373U,
0xa0c06060U, 0x98198181U, 0xd19e4f4fU, 0x7fa3dcdcU,
0x66442222U, 0x7e542a2aU, 0xab3b9090U, 0x830b8888U,
0xca8c4646U, 0x29c7eeeeU, 0xd36bb8b8U, 0x3c281414U,
0x79a7dedeU, 0xe2bc5e5eU, 0x1d160b0bU, 0x76addbdbU,
0x3bdbe0e0U, 0x56643232U, 0x4e743a3aU, 0x1e140a0aU,
0xdb924949U, 0x0a0c0606U, 0x6c482424U, 0xe4b85c5cU,
0x5d9fc2c2U, 0x6ebdd3d3U, 0xef43acacU, 0xa6c46262U,
0xa8399191U, 0xa4319595U, 0x37d3e4e4U, 0x8bf27979U,
0x32d5e7e7U, 0x438bc8c8U, 0x596e3737U, 0xb7da6d6dU,
0x8c018d8dU, 0x64b1d5d5U, 0xd29c4e4eU, 0xe049a9a9U,
0xb4d86c6cU, 0xfaac5656U, 0x07f3f4f4U, 0x25cfeaeaU,
0xafca6565U, 0x8ef47a7aU, 0xe947aeaeU, 0x18100808U,
0xd56fbabaU, 0x88f07878U, 0x6f4a2525U, 0x725c2e2eU,
0x24381c1cU, 0xf157a6a6U, 0xc773b4b4U, 0x5197c6c6U,
0x23cbe8e8U, 0x7ca1ddddU, 0x9ce87474U, 0x213e1f1fU,
0xdd964b4bU, 0xdc61bdbdU, 0x860d8b8bU, 0x850f8a8aU,
0x90e07070U, 0x427c3e3eU, 0xc471b5b5U, 0xaacc6666U,
0xd8904848U, 0x05060303U, 0x01f7f6f6U, 0x121c0e0eU,
0xa3c26161U, 0x5f6a3535U, 0xf9ae5757U, 0xd069b9b9U,
0x91178686U, 0x5899c1c1U, 0x273a1d1dU, 0xb9279e9eU,
0x38d9e1e1U, 0x13ebf8f8U, 0xb32b9898U, 0x33221111U,
0xbbd26969U, 0x70a9d9d9U, 0x89078e8eU, 0xa7339494U,
0xb62d9b9bU, 0x223c1e1eU, 0x92158787U, 0x20c9e9e9U,
0x4987ceceU, 0xffaa5555U, 0x78502828U, 0x7aa5dfdfU,
0x8f038c8cU, 0xf859a1a1U, 0x80098989U, 0x171a0d0dU,
0xda65bfbfU, 0x31d7e6e6U, 0xc6844242U, 0xb8d06868U,
0xc3824141U, 0xb0299999U, 0x775a2d2dU, 0x111e0f0fU,
0xcb7bb0b0U, 0xfca85454U, 0xd66dbbbbU, 0x3a2c1616U,
};
__constant__ u32 Te2[256] =
{
0x63a5c663U, 0x7c84f87cU, 0x7799ee77U, 0x7b8df67bU,
0xf20dfff2U, 0x6bbdd66bU, 0x6fb1de6fU, 0xc55491c5U,
0x30506030U, 0x01030201U, 0x67a9ce67U, 0x2b7d562bU,
0xfe19e7feU, 0xd762b5d7U, 0xabe64dabU, 0x769aec76U,
0xca458fcaU, 0x829d1f82U, 0xc94089c9U, 0x7d87fa7dU,
0xfa15effaU, 0x59ebb259U, 0x47c98e47U, 0xf00bfbf0U,
0xadec41adU, 0xd467b3d4U, 0xa2fd5fa2U, 0xafea45afU,
0x9cbf239cU, 0xa4f753a4U, 0x7296e472U, 0xc05b9bc0U,
0xb7c275b7U, 0xfd1ce1fdU, 0x93ae3d93U, 0x266a4c26U,
0x365a6c36U, 0x3f417e3fU, 0xf702f5f7U, 0xcc4f83ccU,
0x345c6834U, 0xa5f451a5U, 0xe534d1e5U, 0xf108f9f1U,
0x7193e271U, 0xd873abd8U, 0x31536231U, 0x153f2a15U,
0x040c0804U, 0xc75295c7U, 0x23654623U, 0xc35e9dc3U,
0x18283018U, 0x96a13796U, 0x050f0a05U, 0x9ab52f9aU,
0x07090e07U, 0x12362412U, 0x809b1b80U, 0xe23ddfe2U,
0xeb26cdebU, 0x27694e27U, 0xb2cd7fb2U, 0x759fea75U,
0x091b1209U, 0x839e1d83U, 0x2c74582cU, 0x1a2e341aU,
0x1b2d361bU, 0x6eb2dc6eU, 0x5aeeb45aU, 0xa0fb5ba0U,
0x52f6a452U, 0x3b4d763bU, 0xd661b7d6U, 0xb3ce7db3U,
0x297b5229U, 0xe33edde3U, 0x2f715e2fU, 0x84971384U,
0x53f5a653U, 0xd168b9d1U, 0x00000000U, 0xed2cc1edU,
0x20604020U, 0xfc1fe3fcU, 0xb1c879b1U, 0x5bedb65bU,
0x6abed46aU, 0xcb468dcbU, 0xbed967beU, 0x394b7239U,
0x4ade944aU, 0x4cd4984cU, 0x58e8b058U, 0xcf4a85cfU,
0xd06bbbd0U, 0xef2ac5efU, 0xaae54faaU, 0xfb16edfbU,
0x43c58643U, 0x4dd79a4dU, 0x33556633U, 0x85941185U,
0x45cf8a45U, 0xf910e9f9U, 0x02060402U, 0x7f81fe7fU,
0x50f0a050U, 0x3c44783cU, 0x9fba259fU, 0xa8e34ba8U,
0x51f3a251U, 0xa3fe5da3U, 0x40c08040U, 0x8f8a058fU,
0x92ad3f92U, 0x9dbc219dU, 0x38487038U, 0xf504f1f5U,
0xbcdf63bcU, 0xb6c177b6U, 0xda75afdaU, 0x21634221U,
0x10302010U, 0xff1ae5ffU, 0xf30efdf3U, 0xd26dbfd2U,
0xcd4c81cdU, 0x0c14180cU, 0x13352613U, 0xec2fc3ecU,
0x5fe1be5fU, 0x97a23597U, 0x44cc8844U, 0x17392e17U,
0xc45793c4U, 0xa7f255a7U, 0x7e82fc7eU, 0x3d477a3dU,
0x64acc864U, 0x5de7ba5dU, 0x192b3219U, 0x7395e673U,
0x60a0c060U, 0x81981981U, 0x4fd19e4fU, 0xdc7fa3dcU,
0x22664422U, 0x2a7e542aU, 0x90ab3b90U, 0x88830b88U,
0x46ca8c46U, 0xee29c7eeU, 0xb8d36bb8U, 0x143c2814U,
0xde79a7deU, 0x5ee2bc5eU, 0x0b1d160bU, 0xdb76addbU,
0xe03bdbe0U, 0x32566432U, 0x3a4e743aU, 0x0a1e140aU,
0x49db9249U, 0x060a0c06U, 0x246c4824U, 0x5ce4b85cU,
0xc25d9fc2U, 0xd36ebdd3U, 0xacef43acU, 0x62a6c462U,
0x91a83991U, 0x95a43195U, 0xe437d3e4U, 0x798bf279U,
0xe732d5e7U, 0xc8438bc8U, 0x37596e37U, 0x6db7da6dU,
0x8d8c018dU, 0xd564b1d5U, 0x4ed29c4eU, 0xa9e049a9U,
0x6cb4d86cU, 0x56faac56U, 0xf407f3f4U, 0xea25cfeaU,
0x65afca65U, 0x7a8ef47aU, 0xaee947aeU, 0x08181008U,
0xbad56fbaU, 0x7888f078U, 0x256f4a25U, 0x2e725c2eU,
0x1c24381cU, 0xa6f157a6U, 0xb4c773b4U, 0xc65197c6U,
0xe823cbe8U, 0xdd7ca1ddU, 0x749ce874U, 0x1f213e1fU,
0x4bdd964bU, 0xbddc61bdU, 0x8b860d8bU, 0x8a850f8aU,
0x7090e070U, 0x3e427c3eU, 0xb5c471b5U, 0x66aacc66U,
0x48d89048U, 0x03050603U, 0xf601f7f6U, 0x0e121c0eU,
0x61a3c261U, 0x355f6a35U, 0x57f9ae57U, 0xb9d069b9U,
0x86911786U, 0xc15899c1U, 0x1d273a1dU, 0x9eb9279eU,
0xe138d9e1U, 0xf813ebf8U, 0x98b32b98U, 0x11332211U,
0x69bbd269U, 0xd970a9d9U, 0x8e89078eU, 0x94a73394U,
0x9bb62d9bU, 0x1e223c1eU, 0x87921587U, 0xe920c9e9U,
0xce4987ceU, 0x55ffaa55U, 0x28785028U, 0xdf7aa5dfU,
0x8c8f038cU, 0xa1f859a1U, 0x89800989U, 0x0d171a0dU,
0xbfda65bfU, 0xe631d7e6U, 0x42c68442U, 0x68b8d068U,
0x41c38241U, 0x99b02999U, 0x2d775a2dU, 0x0f111e0fU,
0xb0cb7bb0U, 0x54fca854U, 0xbbd66dbbU, 0x163a2c16U,
};
__constant__ u32 Te3[256] =
{
0x6363a5c6U, 0x7c7c84f8U, 0x777799eeU, 0x7b7b8df6U,
0xf2f20dffU, 0x6b6bbdd6U, 0x6f6fb1deU, 0xc5c55491U,
0x30305060U, 0x01010302U, 0x6767a9ceU, 0x2b2b7d56U,
0xfefe19e7U, 0xd7d762b5U, 0xababe64dU, 0x76769aecU,
0xcaca458fU, 0x82829d1fU, 0xc9c94089U, 0x7d7d87faU,
0xfafa15efU, 0x5959ebb2U, 0x4747c98eU, 0xf0f00bfbU,
0xadadec41U, 0xd4d467b3U, 0xa2a2fd5fU, 0xafafea45U,
0x9c9cbf23U, 0xa4a4f753U, 0x727296e4U, 0xc0c05b9bU,
0xb7b7c275U, 0xfdfd1ce1U, 0x9393ae3dU, 0x26266a4cU,
0x36365a6cU, 0x3f3f417eU, 0xf7f702f5U, 0xcccc4f83U,
0x34345c68U, 0xa5a5f451U, 0xe5e534d1U, 0xf1f108f9U,
0x717193e2U, 0xd8d873abU, 0x31315362U, 0x15153f2aU,
0x04040c08U, 0xc7c75295U, 0x23236546U, 0xc3c35e9dU,
0x18182830U, 0x9696a137U, 0x05050f0aU, 0x9a9ab52fU,
0x0707090eU, 0x12123624U, 0x80809b1bU, 0xe2e23ddfU,
0xebeb26cdU, 0x2727694eU, 0xb2b2cd7fU, 0x75759feaU,
0x09091b12U, 0x83839e1dU, 0x2c2c7458U, 0x1a1a2e34U,
0x1b1b2d36U, 0x6e6eb2dcU, 0x5a5aeeb4U, 0xa0a0fb5bU,
0x5252f6a4U, 0x3b3b4d76U, 0xd6d661b7U, 0xb3b3ce7dU,
0x29297b52U, 0xe3e33eddU, 0x2f2f715eU, 0x84849713U,
0x5353f5a6U, 0xd1d168b9U, 0x00000000U, 0xeded2cc1U,
0x20206040U, 0xfcfc1fe3U, 0xb1b1c879U, 0x5b5bedb6U,
0x6a6abed4U, 0xcbcb468dU, 0xbebed967U, 0x39394b72U,
0x4a4ade94U, 0x4c4cd498U, 0x5858e8b0U, 0xcfcf4a85U,
0xd0d06bbbU, 0xefef2ac5U, 0xaaaae54fU, 0xfbfb16edU,
0x4343c586U, 0x4d4dd79aU, 0x33335566U, 0x85859411U,
0x4545cf8aU, 0xf9f910e9U, 0x02020604U, 0x7f7f81feU,
0x5050f0a0U, 0x3c3c4478U, 0x9f9fba25U, 0xa8a8e34bU,
0x5151f3a2U, 0xa3a3fe5dU, 0x4040c080U, 0x8f8f8a05U,
0x9292ad3fU, 0x9d9dbc21U, 0x38384870U, 0xf5f504f1U,
0xbcbcdf63U, 0xb6b6c177U, 0xdada75afU, 0x21216342U,
0x10103020U, 0xffff1ae5U, 0xf3f30efdU, 0xd2d26dbfU,
0xcdcd4c81U, 0x0c0c1418U, 0x13133526U, 0xecec2fc3U,
0x5f5fe1beU, 0x9797a235U, 0x4444cc88U, 0x1717392eU,
0xc4c45793U, 0xa7a7f255U, 0x7e7e82fcU, 0x3d3d477aU,
0x6464acc8U, 0x5d5de7baU, 0x19192b32U, 0x737395e6U,
0x6060a0c0U, 0x81819819U, 0x4f4fd19eU, 0xdcdc7fa3U,
0x22226644U, 0x2a2a7e54U, 0x9090ab3bU, 0x8888830bU,
0x4646ca8cU, 0xeeee29c7U, 0xb8b8d36bU, 0x14143c28U,
0xdede79a7U, 0x5e5ee2bcU, 0x0b0b1d16U, 0xdbdb76adU,
0xe0e03bdbU, 0x32325664U, 0x3a3a4e74U, 0x0a0a1e14U,
0x4949db92U, 0x06060a0cU, 0x24246c48U, 0x5c5ce4b8U,
0xc2c25d9fU, 0xd3d36ebdU, 0xacacef43U, 0x6262a6c4U,
0x9191a839U, 0x9595a431U, 0xe4e437d3U, 0x79798bf2U,
0xe7e732d5U, 0xc8c8438bU, 0x3737596eU, 0x6d6db7daU,
0x8d8d8c01U, 0xd5d564b1U, 0x4e4ed29cU, 0xa9a9e049U,
0x6c6cb4d8U, 0x5656faacU, 0xf4f407f3U, 0xeaea25cfU,
0x6565afcaU, 0x7a7a8ef4U, 0xaeaee947U, 0x08081810U,
0xbabad56fU, 0x787888f0U, 0x25256f4aU, 0x2e2e725cU,
0x1c1c2438U, 0xa6a6f157U, 0xb4b4c773U, 0xc6c65197U,
0xe8e823cbU, 0xdddd7ca1U, 0x74749ce8U, 0x1f1f213eU,
0x4b4bdd96U, 0xbdbddc61U, 0x8b8b860dU, 0x8a8a850fU,
0x707090e0U, 0x3e3e427cU, 0xb5b5c471U, 0x6666aaccU,
0x4848d890U, 0x03030506U, 0xf6f601f7U, 0x0e0e121cU,
0x6161a3c2U, 0x35355f6aU, 0x5757f9aeU, 0xb9b9d069U,
0x86869117U, 0xc1c15899U, 0x1d1d273aU, 0x9e9eb927U,
0xe1e138d9U, 0xf8f813ebU, 0x9898b32bU, 0x11113322U,
0x6969bbd2U, 0xd9d970a9U, 0x8e8e8907U, 0x9494a733U,
0x9b9bb62dU, 0x1e1e223cU, 0x87879215U, 0xe9e920c9U,
0xcece4987U, 0x5555ffaaU, 0x28287850U, 0xdfdf7aa5U,
0x8c8c8f03U, 0xa1a1f859U, 0x89898009U, 0x0d0d171aU,
0xbfbfda65U, 0xe6e631d7U, 0x4242c684U, 0x6868b8d0U,
0x4141c382U, 0x9999b029U, 0x2d2d775aU, 0x0f0f111eU,
0xb0b0cb7bU, 0x5454fca8U, 0xbbbbd66dU, 0x16163a2cU,
};
__constant__ u32 Te4[256] =
{
0x63636363U, 0x7c7c7c7cU, 0x77777777U, 0x7b7b7b7bU,
0xf2f2f2f2U, 0x6b6b6b6bU, 0x6f6f6f6fU, 0xc5c5c5c5U,
0x30303030U, 0x01010101U, 0x67676767U, 0x2b2b2b2bU,
0xfefefefeU, 0xd7d7d7d7U, 0xababababU, 0x76767676U,
0xcacacacaU, 0x82828282U, 0xc9c9c9c9U, 0x7d7d7d7dU,
0xfafafafaU, 0x59595959U, 0x47474747U, 0xf0f0f0f0U,
0xadadadadU, 0xd4d4d4d4U, 0xa2a2a2a2U, 0xafafafafU,
0x9c9c9c9cU, 0xa4a4a4a4U, 0x72727272U, 0xc0c0c0c0U,
0xb7b7b7b7U, 0xfdfdfdfdU, 0x93939393U, 0x26262626U,
0x36363636U, 0x3f3f3f3fU, 0xf7f7f7f7U, 0xccccccccU,
0x34343434U, 0xa5a5a5a5U, 0xe5e5e5e5U, 0xf1f1f1f1U,
0x71717171U, 0xd8d8d8d8U, 0x31313131U, 0x15151515U,
0x04040404U, 0xc7c7c7c7U, 0x23232323U, 0xc3c3c3c3U,
0x18181818U, 0x96969696U, 0x05050505U, 0x9a9a9a9aU,
0x07070707U, 0x12121212U, 0x80808080U, 0xe2e2e2e2U,
0xebebebebU, 0x27272727U, 0xb2b2b2b2U, 0x75757575U,
0x09090909U, 0x83838383U, 0x2c2c2c2cU, 0x1a1a1a1aU,
0x1b1b1b1bU, 0x6e6e6e6eU, 0x5a5a5a5aU, 0xa0a0a0a0U,
0x52525252U, 0x3b3b3b3bU, 0xd6d6d6d6U, 0xb3b3b3b3U,
0x29292929U, 0xe3e3e3e3U, 0x2f2f2f2fU, 0x84848484U,
0x53535353U, 0xd1d1d1d1U, 0x00000000U, 0xededededU,
0x20202020U, 0xfcfcfcfcU, 0xb1b1b1b1U, 0x5b5b5b5bU,
0x6a6a6a6aU, 0xcbcbcbcbU, 0xbebebebeU, 0x39393939U,
0x4a4a4a4aU, 0x4c4c4c4cU, 0x58585858U, 0xcfcfcfcfU,
0xd0d0d0d0U, 0xefefefefU, 0xaaaaaaaaU, 0xfbfbfbfbU,
0x43434343U, 0x4d4d4d4dU, 0x33333333U, 0x85858585U,
0x45454545U, 0xf9f9f9f9U, 0x02020202U, 0x7f7f7f7fU,
0x50505050U, 0x3c3c3c3cU, 0x9f9f9f9fU, 0xa8a8a8a8U,
0x51515151U, 0xa3a3a3a3U, 0x40404040U, 0x8f8f8f8fU,
0x92929292U, 0x9d9d9d9dU, 0x38383838U, 0xf5f5f5f5U,
0xbcbcbcbcU, 0xb6b6b6b6U, 0xdadadadaU, 0x21212121U,
0x10101010U, 0xffffffffU, 0xf3f3f3f3U, 0xd2d2d2d2U,
0xcdcdcdcdU, 0x0c0c0c0cU, 0x13131313U, 0xececececU,
0x5f5f5f5fU, 0x97979797U, 0x44444444U, 0x17171717U,
0xc4c4c4c4U, 0xa7a7a7a7U, 0x7e7e7e7eU, 0x3d3d3d3dU,
0x64646464U, 0x5d5d5d5dU, 0x19191919U, 0x73737373U,
0x60606060U, 0x81818181U, 0x4f4f4f4fU, 0xdcdcdcdcU,
0x22222222U, 0x2a2a2a2aU, 0x90909090U, 0x88888888U,
0x46464646U, 0xeeeeeeeeU, 0xb8b8b8b8U, 0x14141414U,
0xdedededeU, 0x5e5e5e5eU, 0x0b0b0b0bU, 0xdbdbdbdbU,
0xe0e0e0e0U, 0x32323232U, 0x3a3a3a3aU, 0x0a0a0a0aU,
0x49494949U, 0x06060606U, 0x24242424U, 0x5c5c5c5cU,
0xc2c2c2c2U, 0xd3d3d3d3U, 0xacacacacU, 0x62626262U,
0x91919191U, 0x95959595U, 0xe4e4e4e4U, 0x79797979U,
0xe7e7e7e7U, 0xc8c8c8c8U, 0x37373737U, 0x6d6d6d6dU,
0x8d8d8d8dU, 0xd5d5d5d5U, 0x4e4e4e4eU, 0xa9a9a9a9U,
0x6c6c6c6cU, 0x56565656U, 0xf4f4f4f4U, 0xeaeaeaeaU,
0x65656565U, 0x7a7a7a7aU, 0xaeaeaeaeU, 0x08080808U,
0xbabababaU, 0x78787878U, 0x25252525U, 0x2e2e2e2eU,
0x1c1c1c1cU, 0xa6a6a6a6U, 0xb4b4b4b4U, 0xc6c6c6c6U,
0xe8e8e8e8U, 0xddddddddU, 0x74747474U, 0x1f1f1f1fU,
0x4b4b4b4bU, 0xbdbdbdbdU, 0x8b8b8b8bU, 0x8a8a8a8aU,
0x70707070U, 0x3e3e3e3eU, 0xb5b5b5b5U, 0x66666666U,
0x48484848U, 0x03030303U, 0xf6f6f6f6U, 0x0e0e0e0eU,
0x61616161U, 0x35353535U, 0x57575757U, 0xb9b9b9b9U,
0x86868686U, 0xc1c1c1c1U, 0x1d1d1d1dU, 0x9e9e9e9eU,
0xe1e1e1e1U, 0xf8f8f8f8U, 0x98989898U, 0x11111111U,
0x69696969U, 0xd9d9d9d9U, 0x8e8e8e8eU, 0x94949494U,
0x9b9b9b9bU, 0x1e1e1e1eU, 0x87878787U, 0xe9e9e9e9U,
0xcecececeU, 0x55555555U, 0x28282828U, 0xdfdfdfdfU,
0x8c8c8c8cU, 0xa1a1a1a1U, 0x89898989U, 0x0d0d0d0dU,
0xbfbfbfbfU, 0xe6e6e6e6U, 0x42424242U, 0x68686868U,
0x41414141U, 0x99999999U, 0x2d2d2d2dU, 0x0f0f0f0fU,
0xb0b0b0b0U, 0x54545454U, 0xbbbbbbbbU, 0x16161616U,
};
__constant__ u32 Td0[256] =
{
0x51f4a750U, 0x7e416553U, 0x1a17a4c3U, 0x3a275e96U,
0x3bab6bcbU, 0x1f9d45f1U, 0xacfa58abU, 0x4be30393U,
0x2030fa55U, 0xad766df6U, 0x88cc7691U, 0xf5024c25U,
0x4fe5d7fcU, 0xc52acbd7U, 0x26354480U, 0xb562a38fU,
0xdeb15a49U, 0x25ba1b67U, 0x45ea0e98U, 0x5dfec0e1U,
0xc32f7502U, 0x814cf012U, 0x8d4697a3U, 0x6bd3f9c6U,
0x038f5fe7U, 0x15929c95U, 0xbf6d7aebU, 0x955259daU,
0xd4be832dU, 0x587421d3U, 0x49e06929U, 0x8ec9c844U,
0x75c2896aU, 0xf48e7978U, 0x99583e6bU, 0x27b971ddU,
0xbee14fb6U, 0xf088ad17U, 0xc920ac66U, 0x7dce3ab4U,
0x63df4a18U, 0xe51a3182U, 0x97513360U, 0x62537f45U,
0xb16477e0U, 0xbb6bae84U, 0xfe81a01cU, 0xf9082b94U,
0x70486858U, 0x8f45fd19U, 0x94de6c87U, 0x527bf8b7U,
0xab73d323U, 0x724b02e2U, 0xe31f8f57U, 0x6655ab2aU,
0xb2eb2807U, 0x2fb5c203U, 0x86c57b9aU, 0xd33708a5U,
0x302887f2U, 0x23bfa5b2U, 0x02036abaU, 0xed16825cU,
0x8acf1c2bU, 0xa779b492U, 0xf307f2f0U, 0x4e69e2a1U,
0x65daf4cdU, 0x0605bed5U, 0xd134621fU, 0xc4a6fe8aU,
0x342e539dU, 0xa2f355a0U, 0x058ae132U, 0xa4f6eb75U,
0x0b83ec39U, 0x4060efaaU, 0x5e719f06U, 0xbd6e1051U,
0x3e218af9U, 0x96dd063dU, 0xdd3e05aeU, 0x4de6bd46U,
0x91548db5U, 0x71c45d05U, 0x0406d46fU, 0x605015ffU,
0x1998fb24U, 0xd6bde997U, 0x894043ccU, 0x67d99e77U,
0xb0e842bdU, 0x07898b88U, 0xe7195b38U, 0x79c8eedbU,
0xa17c0a47U, 0x7c420fe9U, 0xf8841ec9U, 0x00000000U,
0x09808683U, 0x322bed48U, 0x1e1170acU, 0x6c5a724eU,
0xfd0efffbU, 0x0f853856U, 0x3daed51eU, 0x362d3927U,
0x0a0fd964U, 0x685ca621U, 0x9b5b54d1U, 0x24362e3aU,
0x0c0a67b1U, 0x9357e70fU, 0xb4ee96d2U, 0x1b9b919eU,
0x80c0c54fU, 0x61dc20a2U, 0x5a774b69U, 0x1c121a16U,
0xe293ba0aU, 0xc0a02ae5U, 0x3c22e043U, 0x121b171dU,
0x0e090d0bU, 0xf28bc7adU, 0x2db6a8b9U, 0x141ea9c8U,
0x57f11985U, 0xaf75074cU, 0xee99ddbbU, 0xa37f60fdU,
0xf701269fU, 0x5c72f5bcU, 0x44663bc5U, 0x5bfb7e34U,
0x8b432976U, 0xcb23c6dcU, 0xb6edfc68U, 0xb8e4f163U,
0xd731dccaU, 0x42638510U, 0x13972240U, 0x84c61120U,
0x854a247dU, 0xd2bb3df8U, 0xaef93211U, 0xc729a16dU,
0x1d9e2f4bU, 0xdcb230f3U, 0x0d8652ecU, 0x77c1e3d0U,
0x2bb3166cU, 0xa970b999U, 0x119448faU, 0x47e96422U,
0xa8fc8cc4U, 0xa0f03f1aU, 0x567d2cd8U, 0x223390efU,
0x87494ec7U, 0xd938d1c1U, 0x8ccaa2feU, 0x98d40b36U,
0xa6f581cfU, 0xa57ade28U, 0xdab78e26U, 0x3fadbfa4U,
0x2c3a9de4U, 0x5078920dU, 0x6a5fcc9bU, 0x547e4662U,
0xf68d13c2U, 0x90d8b8e8U, 0x2e39f75eU, 0x82c3aff5U,
0x9f5d80beU, 0x69d0937cU, 0x6fd52da9U, 0xcf2512b3U,
0xc8ac993bU, 0x10187da7U, 0xe89c636eU, 0xdb3bbb7bU,
0xcd267809U, 0x6e5918f4U, 0xec9ab701U, 0x834f9aa8U,
0xe6956e65U, 0xaaffe67eU, 0x21bccf08U, 0xef15e8e6U,
0xbae79bd9U, 0x4a6f36ceU, 0xea9f09d4U, 0x29b07cd6U,
0x31a4b2afU, 0x2a3f2331U, 0xc6a59430U, 0x35a266c0U,
0x744ebc37U, 0xfc82caa6U, 0xe090d0b0U, 0x33a7d815U,
0xf104984aU, 0x41ecdaf7U, 0x7fcd500eU, 0x1791f62fU,
0x764dd68dU, 0x43efb04dU, 0xccaa4d54U, 0xe49604dfU,
0x9ed1b5e3U, 0x4c6a881bU, 0xc12c1fb8U, 0x4665517fU,
0x9d5eea04U, 0x018c355dU, 0xfa877473U, 0xfb0b412eU,
0xb3671d5aU, 0x92dbd252U, 0xe9105633U, 0x6dd64713U,
0x9ad7618cU, 0x37a10c7aU, 0x59f8148eU, 0xeb133c89U,
0xcea927eeU, 0xb761c935U, 0xe11ce5edU, 0x7a47b13cU,
0x9cd2df59U, 0x55f2733fU, 0x1814ce79U, 0x73c737bfU,
0x53f7cdeaU, 0x5ffdaa5bU, 0xdf3d6f14U, 0x7844db86U,
0xcaaff381U, 0xb968c43eU, 0x3824342cU, 0xc2a3405fU,
0x161dc372U, 0xbce2250cU, 0x283c498bU, 0xff0d9541U,
0x39a80171U, 0x080cb3deU, 0xd8b4e49cU, 0x6456c190U,
0x7bcb8461U, 0xd532b670U, 0x486c5c74U, 0xd0b85742U,
};
__constant__ u32 Td1[256] =
{
0x5051f4a7U, 0x537e4165U, 0xc31a17a4U, 0x963a275eU,
0xcb3bab6bU, 0xf11f9d45U, 0xabacfa58U, 0x934be303U,
0x552030faU, 0xf6ad766dU, 0x9188cc76U, 0x25f5024cU,
0xfc4fe5d7U, 0xd7c52acbU, 0x80263544U, 0x8fb562a3U,
0x49deb15aU, 0x6725ba1bU, 0x9845ea0eU, 0xe15dfec0U,
0x02c32f75U, 0x12814cf0U, 0xa38d4697U, 0xc66bd3f9U,
0xe7038f5fU, 0x9515929cU, 0xebbf6d7aU, 0xda955259U,
0x2dd4be83U, 0xd3587421U, 0x2949e069U, 0x448ec9c8U,
0x6a75c289U, 0x78f48e79U, 0x6b99583eU, 0xdd27b971U,
0xb6bee14fU, 0x17f088adU, 0x66c920acU, 0xb47dce3aU,
0x1863df4aU, 0x82e51a31U, 0x60975133U, 0x4562537fU,
0xe0b16477U, 0x84bb6baeU, 0x1cfe81a0U, 0x94f9082bU,
0x58704868U, 0x198f45fdU, 0x8794de6cU, 0xb7527bf8U,
0x23ab73d3U, 0xe2724b02U, 0x57e31f8fU, 0x2a6655abU,
0x07b2eb28U, 0x032fb5c2U, 0x9a86c57bU, 0xa5d33708U,
0xf2302887U, 0xb223bfa5U, 0xba02036aU, 0x5ced1682U,
0x2b8acf1cU, 0x92a779b4U, 0xf0f307f2U, 0xa14e69e2U,
0xcd65daf4U, 0xd50605beU, 0x1fd13462U, 0x8ac4a6feU,
0x9d342e53U, 0xa0a2f355U, 0x32058ae1U, 0x75a4f6ebU,
0x390b83ecU, 0xaa4060efU, 0x065e719fU, 0x51bd6e10U,
0xf93e218aU, 0x3d96dd06U, 0xaedd3e05U, 0x464de6bdU,
0xb591548dU, 0x0571c45dU, 0x6f0406d4U, 0xff605015U,
0x241998fbU, 0x97d6bde9U, 0xcc894043U, 0x7767d99eU,
0xbdb0e842U, 0x8807898bU, 0x38e7195bU, 0xdb79c8eeU,
0x47a17c0aU, 0xe97c420fU, 0xc9f8841eU, 0x00000000U,
0x83098086U, 0x48322bedU, 0xac1e1170U, 0x4e6c5a72U,
0xfbfd0effU, 0x560f8538U, 0x1e3daed5U, 0x27362d39U,
0x640a0fd9U, 0x21685ca6U, 0xd19b5b54U, 0x3a24362eU,
0xb10c0a67U, 0x0f9357e7U, 0xd2b4ee96U, 0x9e1b9b91U,
0x4f80c0c5U, 0xa261dc20U, 0x695a774bU, 0x161c121aU,
0x0ae293baU, 0xe5c0a02aU, 0x433c22e0U, 0x1d121b17U,
0x0b0e090dU, 0xadf28bc7U, 0xb92db6a8U, 0xc8141ea9U,
0x8557f119U, 0x4caf7507U, 0xbbee99ddU, 0xfda37f60U,
0x9ff70126U, 0xbc5c72f5U, 0xc544663bU, 0x345bfb7eU,
0x768b4329U, 0xdccb23c6U, 0x68b6edfcU, 0x63b8e4f1U,
0xcad731dcU, 0x10426385U, 0x40139722U, 0x2084c611U,
0x7d854a24U, 0xf8d2bb3dU, 0x11aef932U, 0x6dc729a1U,
0x4b1d9e2fU, 0xf3dcb230U, 0xec0d8652U, 0xd077c1e3U,
0x6c2bb316U, 0x99a970b9U, 0xfa119448U, 0x2247e964U,
0xc4a8fc8cU, 0x1aa0f03fU, 0xd8567d2cU, 0xef223390U,
0xc787494eU, 0xc1d938d1U, 0xfe8ccaa2U, 0x3698d40bU,
0xcfa6f581U, 0x28a57adeU, 0x26dab78eU, 0xa43fadbfU,
0xe42c3a9dU, 0x0d507892U, 0x9b6a5fccU, 0x62547e46U,
0xc2f68d13U, 0xe890d8b8U, 0x5e2e39f7U, 0xf582c3afU,
0xbe9f5d80U, 0x7c69d093U, 0xa96fd52dU, 0xb3cf2512U,
0x3bc8ac99U, 0xa710187dU, 0x6ee89c63U, 0x7bdb3bbbU,
0x09cd2678U, 0xf46e5918U, 0x01ec9ab7U, 0xa8834f9aU,
0x65e6956eU, 0x7eaaffe6U, 0x0821bccfU, 0xe6ef15e8U,
0xd9bae79bU, 0xce4a6f36U, 0xd4ea9f09U, 0xd629b07cU,
0xaf31a4b2U, 0x312a3f23U, 0x30c6a594U, 0xc035a266U,
0x37744ebcU, 0xa6fc82caU, 0xb0e090d0U, 0x1533a7d8U,
0x4af10498U, 0xf741ecdaU, 0x0e7fcd50U, 0x2f1791f6U,
0x8d764dd6U, 0x4d43efb0U, 0x54ccaa4dU, 0xdfe49604U,
0xe39ed1b5U, 0x1b4c6a88U, 0xb8c12c1fU, 0x7f466551U,
0x049d5eeaU, 0x5d018c35U, 0x73fa8774U, 0x2efb0b41U,
0x5ab3671dU, 0x5292dbd2U, 0x33e91056U, 0x136dd647U,
0x8c9ad761U, 0x7a37a10cU, 0x8e59f814U, 0x89eb133cU,
0xeecea927U, 0x35b761c9U, 0xede11ce5U, 0x3c7a47b1U,
0x599cd2dfU, 0x3f55f273U, 0x791814ceU, 0xbf73c737U,
0xea53f7cdU, 0x5b5ffdaaU, 0x14df3d6fU, 0x867844dbU,
0x81caaff3U, 0x3eb968c4U, 0x2c382434U, 0x5fc2a340U,
0x72161dc3U, 0x0cbce225U, 0x8b283c49U, 0x41ff0d95U,
0x7139a801U, 0xde080cb3U, 0x9cd8b4e4U, 0x906456c1U,
0x617bcb84U, 0x70d532b6U, 0x74486c5cU, 0x42d0b857U,
};
__constant__ u32 Td2[256] =
{
0xa75051f4U, 0x65537e41U, 0xa4c31a17U, 0x5e963a27U,
0x6bcb3babU, 0x45f11f9dU, 0x58abacfaU, 0x03934be3U,
0xfa552030U, 0x6df6ad76U, 0x769188ccU, 0x4c25f502U,
0xd7fc4fe5U, 0xcbd7c52aU, 0x44802635U, 0xa38fb562U,
0x5a49deb1U, 0x1b6725baU, 0x0e9845eaU, 0xc0e15dfeU,
0x7502c32fU, 0xf012814cU, 0x97a38d46U, 0xf9c66bd3U,
0x5fe7038fU, 0x9c951592U, 0x7aebbf6dU, 0x59da9552U,
0x832dd4beU, 0x21d35874U, 0x692949e0U, 0xc8448ec9U,
0x896a75c2U, 0x7978f48eU, 0x3e6b9958U, 0x71dd27b9U,
0x4fb6bee1U, 0xad17f088U, 0xac66c920U, 0x3ab47dceU,
0x4a1863dfU, 0x3182e51aU, 0x33609751U, 0x7f456253U,
0x77e0b164U, 0xae84bb6bU, 0xa01cfe81U, 0x2b94f908U,
0x68587048U, 0xfd198f45U, 0x6c8794deU, 0xf8b7527bU,
0xd323ab73U, 0x02e2724bU, 0x8f57e31fU, 0xab2a6655U,
0x2807b2ebU, 0xc2032fb5U, 0x7b9a86c5U, 0x08a5d337U,
0x87f23028U, 0xa5b223bfU, 0x6aba0203U, 0x825ced16U,
0x1c2b8acfU, 0xb492a779U, 0xf2f0f307U, 0xe2a14e69U,
0xf4cd65daU, 0xbed50605U, 0x621fd134U, 0xfe8ac4a6U,
0x539d342eU, 0x55a0a2f3U, 0xe132058aU, 0xeb75a4f6U,
0xec390b83U, 0xefaa4060U, 0x9f065e71U, 0x1051bd6eU,
0x8af93e21U, 0x063d96ddU, 0x05aedd3eU, 0xbd464de6U,
0x8db59154U, 0x5d0571c4U, 0xd46f0406U, 0x15ff6050U,
0xfb241998U, 0xe997d6bdU, 0x43cc8940U, 0x9e7767d9U,
0x42bdb0e8U, 0x8b880789U, 0x5b38e719U, 0xeedb79c8U,
0x0a47a17cU, 0x0fe97c42U, 0x1ec9f884U, 0x00000000U,
0x86830980U, 0xed48322bU, 0x70ac1e11U, 0x724e6c5aU,
0xfffbfd0eU, 0x38560f85U, 0xd51e3daeU, 0x3927362dU,
0xd9640a0fU, 0xa621685cU, 0x54d19b5bU, 0x2e3a2436U,
0x67b10c0aU, 0xe70f9357U, 0x96d2b4eeU, 0x919e1b9bU,
0xc54f80c0U, 0x20a261dcU, 0x4b695a77U, 0x1a161c12U,
0xba0ae293U, 0x2ae5c0a0U, 0xe0433c22U, 0x171d121bU,
0x0d0b0e09U, 0xc7adf28bU, 0xa8b92db6U, 0xa9c8141eU,
0x198557f1U, 0x074caf75U, 0xddbbee99U, 0x60fda37fU,
0x269ff701U, 0xf5bc5c72U, 0x3bc54466U, 0x7e345bfbU,
0x29768b43U, 0xc6dccb23U, 0xfc68b6edU, 0xf163b8e4U,
0xdccad731U, 0x85104263U, 0x22401397U, 0x112084c6U,
0x247d854aU, 0x3df8d2bbU, 0x3211aef9U, 0xa16dc729U,
0x2f4b1d9eU, 0x30f3dcb2U, 0x52ec0d86U, 0xe3d077c1U,
0x166c2bb3U, 0xb999a970U, 0x48fa1194U, 0x642247e9U,
0x8cc4a8fcU, 0x3f1aa0f0U, 0x2cd8567dU, 0x90ef2233U,
0x4ec78749U, 0xd1c1d938U, 0xa2fe8ccaU, 0x0b3698d4U,
0x81cfa6f5U, 0xde28a57aU, 0x8e26dab7U, 0xbfa43fadU,
0x9de42c3aU, 0x920d5078U, 0xcc9b6a5fU, 0x4662547eU,
0x13c2f68dU, 0xb8e890d8U, 0xf75e2e39U, 0xaff582c3U,
0x80be9f5dU, 0x937c69d0U, 0x2da96fd5U, 0x12b3cf25U,
0x993bc8acU, 0x7da71018U, 0x636ee89cU, 0xbb7bdb3bU,
0x7809cd26U, 0x18f46e59U, 0xb701ec9aU, 0x9aa8834fU,
0x6e65e695U, 0xe67eaaffU, 0xcf0821bcU, 0xe8e6ef15U,
0x9bd9bae7U, 0x36ce4a6fU, 0x09d4ea9fU, 0x7cd629b0U,
0xb2af31a4U, 0x23312a3fU, 0x9430c6a5U, 0x66c035a2U,
0xbc37744eU, 0xcaa6fc82U, 0xd0b0e090U, 0xd81533a7U,
0x984af104U, 0xdaf741ecU, 0x500e7fcdU, 0xf62f1791U,
0xd68d764dU, 0xb04d43efU, 0x4d54ccaaU, 0x04dfe496U,
0xb5e39ed1U, 0x881b4c6aU, 0x1fb8c12cU, 0x517f4665U,
0xea049d5eU, 0x355d018cU, 0x7473fa87U, 0x412efb0bU,
0x1d5ab367U, 0xd25292dbU, 0x5633e910U, 0x47136dd6U,
0x618c9ad7U, 0x0c7a37a1U, 0x148e59f8U, 0x3c89eb13U,
0x27eecea9U, 0xc935b761U, 0xe5ede11cU, 0xb13c7a47U,
0xdf599cd2U, 0x733f55f2U, 0xce791814U, 0x37bf73c7U,
0xcdea53f7U, 0xaa5b5ffdU, 0x6f14df3dU, 0xdb867844U,
0xf381caafU, 0xc43eb968U, 0x342c3824U, 0x405fc2a3U,
0xc372161dU, 0x250cbce2U, 0x498b283cU, 0x9541ff0dU,
0x017139a8U, 0xb3de080cU, 0xe49cd8b4U, 0xc1906456U,
0x84617bcbU, 0xb670d532U, 0x5c74486cU, 0x5742d0b8U,
};
__constant__ u32 Td3[256] =
{
0xf4a75051U, 0x4165537eU, 0x17a4c31aU, 0x275e963aU,
0xab6bcb3bU, 0x9d45f11fU, 0xfa58abacU, 0xe303934bU,
0x30fa5520U, 0x766df6adU, 0xcc769188U, 0x024c25f5U,
0xe5d7fc4fU, 0x2acbd7c5U, 0x35448026U, 0x62a38fb5U,
0xb15a49deU, 0xba1b6725U, 0xea0e9845U, 0xfec0e15dU,
0x2f7502c3U, 0x4cf01281U, 0x4697a38dU, 0xd3f9c66bU,
0x8f5fe703U, 0x929c9515U, 0x6d7aebbfU, 0x5259da95U,
0xbe832dd4U, 0x7421d358U, 0xe0692949U, 0xc9c8448eU,
0xc2896a75U, 0x8e7978f4U, 0x583e6b99U, 0xb971dd27U,
0xe14fb6beU, 0x88ad17f0U, 0x20ac66c9U, 0xce3ab47dU,
0xdf4a1863U, 0x1a3182e5U, 0x51336097U, 0x537f4562U,
0x6477e0b1U, 0x6bae84bbU, 0x81a01cfeU, 0x082b94f9U,
0x48685870U, 0x45fd198fU, 0xde6c8794U, 0x7bf8b752U,
0x73d323abU, 0x4b02e272U, 0x1f8f57e3U, 0x55ab2a66U,
0xeb2807b2U, 0xb5c2032fU, 0xc57b9a86U, 0x3708a5d3U,
0x2887f230U, 0xbfa5b223U, 0x036aba02U, 0x16825cedU,
0xcf1c2b8aU, 0x79b492a7U, 0x07f2f0f3U, 0x69e2a14eU,
0xdaf4cd65U, 0x05bed506U, 0x34621fd1U, 0xa6fe8ac4U,
0x2e539d34U, 0xf355a0a2U, 0x8ae13205U, 0xf6eb75a4U,
0x83ec390bU, 0x60efaa40U, 0x719f065eU, 0x6e1051bdU,
0x218af93eU, 0xdd063d96U, 0x3e05aeddU, 0xe6bd464dU,
0x548db591U, 0xc45d0571U, 0x06d46f04U, 0x5015ff60U,
0x98fb2419U, 0xbde997d6U, 0x4043cc89U, 0xd99e7767U,
0xe842bdb0U, 0x898b8807U, 0x195b38e7U, 0xc8eedb79U,
0x7c0a47a1U, 0x420fe97cU, 0x841ec9f8U, 0x00000000U,
0x80868309U, 0x2bed4832U, 0x1170ac1eU, 0x5a724e6cU,
0x0efffbfdU, 0x8538560fU, 0xaed51e3dU, 0x2d392736U,
0x0fd9640aU, 0x5ca62168U, 0x5b54d19bU, 0x362e3a24U,
0x0a67b10cU, 0x57e70f93U, 0xee96d2b4U, 0x9b919e1bU,
0xc0c54f80U, 0xdc20a261U, 0x774b695aU, 0x121a161cU,
0x93ba0ae2U, 0xa02ae5c0U, 0x22e0433cU, 0x1b171d12U,
0x090d0b0eU, 0x8bc7adf2U, 0xb6a8b92dU, 0x1ea9c814U,
0xf1198557U, 0x75074cafU, 0x99ddbbeeU, 0x7f60fda3U,
0x01269ff7U, 0x72f5bc5cU, 0x663bc544U, 0xfb7e345bU,
0x4329768bU, 0x23c6dccbU, 0xedfc68b6U, 0xe4f163b8U,
0x31dccad7U, 0x63851042U, 0x97224013U, 0xc6112084U,
0x4a247d85U, 0xbb3df8d2U, 0xf93211aeU, 0x29a16dc7U,
0x9e2f4b1dU, 0xb230f3dcU, 0x8652ec0dU, 0xc1e3d077U,
0xb3166c2bU, 0x70b999a9U, 0x9448fa11U, 0xe9642247U,
0xfc8cc4a8U, 0xf03f1aa0U, 0x7d2cd856U, 0x3390ef22U,
0x494ec787U, 0x38d1c1d9U, 0xcaa2fe8cU, 0xd40b3698U,
0xf581cfa6U, 0x7ade28a5U, 0xb78e26daU, 0xadbfa43fU,
0x3a9de42cU, 0x78920d50U, 0x5fcc9b6aU, 0x7e466254U,
0x8d13c2f6U, 0xd8b8e890U, 0x39f75e2eU, 0xc3aff582U,
0x5d80be9fU, 0xd0937c69U, 0xd52da96fU, 0x2512b3cfU,
0xac993bc8U, 0x187da710U, 0x9c636ee8U, 0x3bbb7bdbU,
0x267809cdU, 0x5918f46eU, 0x9ab701ecU, 0x4f9aa883U,
0x956e65e6U, 0xffe67eaaU, 0xbccf0821U, 0x15e8e6efU,
0xe79bd9baU, 0x6f36ce4aU, 0x9f09d4eaU, 0xb07cd629U,
0xa4b2af31U, 0x3f23312aU, 0xa59430c6U, 0xa266c035U,
0x4ebc3774U, 0x82caa6fcU, 0x90d0b0e0U, 0xa7d81533U,
0x04984af1U, 0xecdaf741U, 0xcd500e7fU, 0x91f62f17U,
0x4dd68d76U, 0xefb04d43U, 0xaa4d54ccU, 0x9604dfe4U,
0xd1b5e39eU, 0x6a881b4cU, 0x2c1fb8c1U, 0x65517f46U,
0x5eea049dU, 0x8c355d01U, 0x877473faU, 0x0b412efbU,
0x671d5ab3U, 0xdbd25292U, 0x105633e9U, 0xd647136dU,
0xd7618c9aU, 0xa10c7a37U, 0xf8148e59U, 0x133c89ebU,
0xa927eeceU, 0x61c935b7U, 0x1ce5ede1U, 0x47b13c7aU,
0xd2df599cU, 0xf2733f55U, 0x14ce7918U, 0xc737bf73U,
0xf7cdea53U, 0xfdaa5b5fU, 0x3d6f14dfU, 0x44db8678U,
0xaff381caU, 0x68c43eb9U, 0x24342c38U, 0xa3405fc2U,
0x1dc37216U, 0xe2250cbcU, 0x3c498b28U, 0x0d9541ffU,
0xa8017139U, 0x0cb3de08U, 0xb4e49cd8U, 0x56c19064U,
0xcb84617bU, 0x32b670d5U, 0x6c5c7448U, 0xb85742d0U,
};
__constant__ u32 Td4[256] =
{
0x52525252U, 0x09090909U, 0x6a6a6a6aU, 0xd5d5d5d5U,
0x30303030U, 0x36363636U, 0xa5a5a5a5U, 0x38383838U,
0xbfbfbfbfU, 0x40404040U, 0xa3a3a3a3U, 0x9e9e9e9eU,
0x81818181U, 0xf3f3f3f3U, 0xd7d7d7d7U, 0xfbfbfbfbU,
0x7c7c7c7cU, 0xe3e3e3e3U, 0x39393939U, 0x82828282U,
0x9b9b9b9bU, 0x2f2f2f2fU, 0xffffffffU, 0x87878787U,
0x34343434U, 0x8e8e8e8eU, 0x43434343U, 0x44444444U,
0xc4c4c4c4U, 0xdedededeU, 0xe9e9e9e9U, 0xcbcbcbcbU,
0x54545454U, 0x7b7b7b7bU, 0x94949494U, 0x32323232U,
0xa6a6a6a6U, 0xc2c2c2c2U, 0x23232323U, 0x3d3d3d3dU,
0xeeeeeeeeU, 0x4c4c4c4cU, 0x95959595U, 0x0b0b0b0bU,
0x42424242U, 0xfafafafaU, 0xc3c3c3c3U, 0x4e4e4e4eU,
0x08080808U, 0x2e2e2e2eU, 0xa1a1a1a1U, 0x66666666U,
0x28282828U, 0xd9d9d9d9U, 0x24242424U, 0xb2b2b2b2U,
0x76767676U, 0x5b5b5b5bU, 0xa2a2a2a2U, 0x49494949U,
0x6d6d6d6dU, 0x8b8b8b8bU, 0xd1d1d1d1U, 0x25252525U,
0x72727272U, 0xf8f8f8f8U, 0xf6f6f6f6U, 0x64646464U,
0x86868686U, 0x68686868U, 0x98989898U, 0x16161616U,
0xd4d4d4d4U, 0xa4a4a4a4U, 0x5c5c5c5cU, 0xccccccccU,
0x5d5d5d5dU, 0x65656565U, 0xb6b6b6b6U, 0x92929292U,
0x6c6c6c6cU, 0x70707070U, 0x48484848U, 0x50505050U,
0xfdfdfdfdU, 0xededededU, 0xb9b9b9b9U, 0xdadadadaU,
0x5e5e5e5eU, 0x15151515U, 0x46464646U, 0x57575757U,
0xa7a7a7a7U, 0x8d8d8d8dU, 0x9d9d9d9dU, 0x84848484U,
0x90909090U, 0xd8d8d8d8U, 0xababababU, 0x00000000U,
0x8c8c8c8cU, 0xbcbcbcbcU, 0xd3d3d3d3U, 0x0a0a0a0aU,
0xf7f7f7f7U, 0xe4e4e4e4U, 0x58585858U, 0x05050505U,
0xb8b8b8b8U, 0xb3b3b3b3U, 0x45454545U, 0x06060606U,
0xd0d0d0d0U, 0x2c2c2c2cU, 0x1e1e1e1eU, 0x8f8f8f8fU,
0xcacacacaU, 0x3f3f3f3fU, 0x0f0f0f0fU, 0x02020202U,
0xc1c1c1c1U, 0xafafafafU, 0xbdbdbdbdU, 0x03030303U,
0x01010101U, 0x13131313U, 0x8a8a8a8aU, 0x6b6b6b6bU,
0x3a3a3a3aU, 0x91919191U, 0x11111111U, 0x41414141U,
0x4f4f4f4fU, 0x67676767U, 0xdcdcdcdcU, 0xeaeaeaeaU,
0x97979797U, 0xf2f2f2f2U, 0xcfcfcfcfU, 0xcecececeU,
0xf0f0f0f0U, 0xb4b4b4b4U, 0xe6e6e6e6U, 0x73737373U,
0x96969696U, 0xacacacacU, 0x74747474U, 0x22222222U,
0xe7e7e7e7U, 0xadadadadU, 0x35353535U, 0x85858585U,
0xe2e2e2e2U, 0xf9f9f9f9U, 0x37373737U, 0xe8e8e8e8U,
0x1c1c1c1cU, 0x75757575U, 0xdfdfdfdfU, 0x6e6e6e6eU,
0x47474747U, 0xf1f1f1f1U, 0x1a1a1a1aU, 0x71717171U,
0x1d1d1d1dU, 0x29292929U, 0xc5c5c5c5U, 0x89898989U,
0x6f6f6f6fU, 0xb7b7b7b7U, 0x62626262U, 0x0e0e0e0eU,
0xaaaaaaaaU, 0x18181818U, 0xbebebebeU, 0x1b1b1b1bU,
0xfcfcfcfcU, 0x56565656U, 0x3e3e3e3eU, 0x4b4b4b4bU,
0xc6c6c6c6U, 0xd2d2d2d2U, 0x79797979U, 0x20202020U,
0x9a9a9a9aU, 0xdbdbdbdbU, 0xc0c0c0c0U, 0xfefefefeU,
0x78787878U, 0xcdcdcdcdU, 0x5a5a5a5aU, 0xf4f4f4f4U,
0x1f1f1f1fU, 0xddddddddU, 0xa8a8a8a8U, 0x33333333U,
0x88888888U, 0x07070707U, 0xc7c7c7c7U, 0x31313131U,
0xb1b1b1b1U, 0x12121212U, 0x10101010U, 0x59595959U,
0x27272727U, 0x80808080U, 0xececececU, 0x5f5f5f5fU,
0x60606060U, 0x51515151U, 0x7f7f7f7fU, 0xa9a9a9a9U,
0x19191919U, 0xb5b5b5b5U, 0x4a4a4a4aU, 0x0d0d0d0dU,
0x2d2d2d2dU, 0xe5e5e5e5U, 0x7a7a7a7aU, 0x9f9f9f9fU,
0x93939393U, 0xc9c9c9c9U, 0x9c9c9c9cU, 0xefefefefU,
0xa0a0a0a0U, 0xe0e0e0e0U, 0x3b3b3b3bU, 0x4d4d4d4dU,
0xaeaeaeaeU, 0x2a2a2a2aU, 0xf5f5f5f5U, 0xb0b0b0b0U,
0xc8c8c8c8U, 0xebebebebU, 0xbbbbbbbbU, 0x3c3c3c3cU,
0x83838383U, 0x53535353U, 0x99999999U, 0x61616161U,
0x17171717U, 0x2b2b2b2bU, 0x04040404U, 0x7e7e7e7eU,
0xbabababaU, 0x77777777U, 0xd6d6d6d6U, 0x26262626U,
0xe1e1e1e1U, 0x69696969U, 0x14141414U, 0x63636363U,
0x55555555U, 0x21212121U, 0x0c0c0c0cU, 0x7d7d7d7dU,
};
__constant__ u32 rcon[] =
{
0x01000000, 0x02000000, 0x04000000, 0x08000000,
0x10000000, 0x20000000, 0x40000000, 0x80000000,
0x1B000000, 0x36000000,
/* for 128-bit blocks, Rijndael never uses more than 10 rcon values */
};
/*
* Device code file for AES
*/
__global__ void aes_encrypt_bpt(u32 *rk, int nrounds, u8* text)
{
u32 s0, s1, s2, s3, t0, t1, t2, t3;
u8 *txt = text+(16*(blockIdx.x*blockDim.x+threadIdx.x));
s0 = GETU32(txt ) ^ rk[0];
s1 = GETU32(txt + 4) ^ rk[1];
s2 = GETU32(txt + 8) ^ rk[2];
s3 = GETU32(txt + 12) ^ rk[3];
/* round 1: */
t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[ 4];
t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[ 5];
t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[ 6];
t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[ 7];
/* round 2: */
s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[ 8];
s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[ 9];
s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[10];
s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[11];
/* round 3: */
t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[12];
t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[13];
t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[14];
t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[15];
/* round 4: */
s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[16];
s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[17];
s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[18];
s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[19];
/* round 5: */
t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[20];
t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[21];
t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[22];
t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[23];
/* round 6: */
s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[24];
s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[25];
s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[26];
s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[27];
/* round 7: */
t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[28];
t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[29];
t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[30];
t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[31];
/* round 8: */
s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[32];
s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[33];
s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[34];
s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[35];
/* round 9: */
t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[36];
t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[37];
t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[38];
t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[39];
if (nrounds > 10)
{
/* round 10: */
s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[40];
s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[41];
s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[42];
s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[43];
/* round 11: */
t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[44];
t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[45];
t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[46];
t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[47];
if (nrounds > 12)
{
/* round 12: */
s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[48];
s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[49];
s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[50];
s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[51];
/* round 13: */
t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[52];
t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[53];
t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[54];
t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[55];
}
}
rk += nrounds << 2;
s0 =
(Te4[(t0 >> 24) ] & 0xff000000) ^
(Te4[(t1 >> 16) & 0xff] & 0x00ff0000) ^
(Te4[(t2 >> 8) & 0xff] & 0x0000ff00) ^
(Te4[(t3 ) & 0xff] & 0x000000ff) ^
rk[0];
PUTU32(txt , s0);
s1 =
(Te4[(t1 >> 24) ] & 0xff000000) ^
(Te4[(t2 >> 16) & 0xff] & 0x00ff0000) ^
(Te4[(t3 >> 8) & 0xff] & 0x0000ff00) ^
(Te4[(t0 ) & 0xff] & 0x000000ff) ^
rk[1];
PUTU32(txt + 4, s1);
s2 =
(Te4[(t2 >> 24) ] & 0xff000000) ^
(Te4[(t3 >> 16) & 0xff] & 0x00ff0000) ^
(Te4[(t0 >> 8) & 0xff] & 0x0000ff00) ^
(Te4[(t1 ) & 0xff] & 0x000000ff) ^
rk[2];
PUTU32(txt + 8, s2);
s3 =
(Te4[(t3 >> 24) ] & 0xff000000) ^
(Te4[(t0 >> 16) & 0xff] & 0x00ff0000) ^
(Te4[(t1 >> 8) & 0xff] & 0x0000ff00) ^
(Te4[(t2 ) & 0xff] & 0x000000ff) ^
rk[3];
PUTU32(txt + 12, s3);
}
__global__ void aes_decrypt_bpt(u32 *rk, int nrounds, u8* text)
{
u32 s0, s1, s2, s3, t0, t1, t2, t3;
u8 *txt = text+(16*(blockIdx.x*blockDim.x+threadIdx.x));
/*
* map byte array block to cipher state
* and add initial round key:
*/
s0 = GETU32(txt ) ^ rk[0];
s1 = GETU32(txt + 4) ^ rk[1];
s2 = GETU32(txt + 8) ^ rk[2];
s3 = GETU32(txt + 12) ^ rk[3];
/* round 1: */
t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[ 4];
t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[ 5];
t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[ 6];
t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[ 7];
/* round 2: */
s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[ 8];
s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^ Td3[t2 & 0xff] ^ rk[ 9];
s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >> 8) & 0xff] ^ Td3[t3 & 0xff] ^ rk[10];
s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[11];
/* round 3: */
t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[12];
t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[13];
t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[14];
t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[15];
/* round 4: */
s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[16];
s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^ Td3[t2 & 0xff] ^ rk[17];
s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >> 8) & 0xff] ^ Td3[t3 & 0xff] ^ rk[18];
s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[19];
/* round 5: */
t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[20];
t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[21];
t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[22];
t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[23];
/* round 6: */
s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[24];
s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^ Td3[t2 & 0xff] ^ rk[25];
s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >> 8) & 0xff] ^ Td3[t3 & 0xff] ^ rk[26];
s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[27];
/* round 7: */
t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[28];
t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[29];
t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[30];
t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[31];
/* round 8: */
s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[32];
s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^ Td3[t2 & 0xff] ^ rk[33];
s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >> 8) & 0xff] ^ Td3[t3 & 0xff] ^ rk[34];
s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[35];
/* round 9: */
t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[36];
t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[37];
t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[38];
t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[39];
if (nrounds > 10)
{
/* round 10: */
s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[40];
s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^ Td3[t2 & 0xff] ^ rk[41];
s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >> 8) & 0xff] ^ Td3[t3 & 0xff] ^ rk[42];
s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[43];
/* round 11: */
t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[44];
t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[45];
t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[46];
t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[47];
if (nrounds > 12)
{
/* round 12: */
s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[48];
s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^ Td3[t2 & 0xff] ^ rk[49];
s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >> 8) & 0xff] ^ Td3[t3 & 0xff] ^ rk[50];
s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[51];
/* round 13: */
t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[52];
t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[53];
t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[54];
t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[55];
}
}
rk += nrounds << 2;
s0 =
(Td4[(t0 >> 24) ] & 0xff000000) ^
(Td4[(t3 >> 16) & 0xff] & 0x00ff0000) ^
(Td4[(t2 >> 8) & 0xff] & 0x0000ff00) ^
(Td4[(t1 ) & 0xff] & 0x000000ff) ^
rk[0];
PUTU32(txt , s0);
s1 =
(Td4[(t1 >> 24) ] & 0xff000000) ^
(Td4[(t0 >> 16) & 0xff] & 0x00ff0000) ^
(Td4[(t3 >> 8) & 0xff] & 0x0000ff00) ^
(Td4[(t2 ) & 0xff] & 0x000000ff) ^
rk[1];
PUTU32(txt + 4, s1);
s2 =
(Td4[(t2 >> 24) ] & 0xff000000) ^
(Td4[(t1 >> 16) & 0xff] & 0x00ff0000) ^
(Td4[(t0 >> 8) & 0xff] & 0x0000ff00) ^
(Td4[(t3 ) & 0xff] & 0x000000ff) ^
rk[2];
PUTU32(txt + 8, s2);
s3 =
(Td4[(t3 >> 24) ] & 0xff000000) ^
(Td4[(t2 >> 16) & 0xff] & 0x00ff0000) ^
(Td4[(t1 >> 8) & 0xff] & 0x0000ff00) ^
(Td4[(t0 ) & 0xff] & 0x000000ff) ^
rk[3];
PUTU32(txt + 12, s3);
}
| 5e46c8d46d129024c67ab6a5e22a1b03d16d6bb1.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
typedef unsigned long long int u64;
typedef unsigned int u32;
typedef unsigned short u16;
typedef unsigned char u8;
#define ENDIAN_SELECTOR 0x00000123
#define GETU32(plaintext) \
__byte_perm(*(u32*)(plaintext), 0, ENDIAN_SELECTOR)
#define PUTU32(ciphertext, st) \
{*(u32*)(ciphertext) = __byte_perm((st), 0, ENDIAN_SELECTOR);}
__constant__ u32 Te0[256] =
{
0xc66363a5U, 0xf87c7c84U, 0xee777799U, 0xf67b7b8dU,
0xfff2f20dU, 0xd66b6bbdU, 0xde6f6fb1U, 0x91c5c554U,
0x60303050U, 0x02010103U, 0xce6767a9U, 0x562b2b7dU,
0xe7fefe19U, 0xb5d7d762U, 0x4dababe6U, 0xec76769aU,
0x8fcaca45U, 0x1f82829dU, 0x89c9c940U, 0xfa7d7d87U,
0xeffafa15U, 0xb25959ebU, 0x8e4747c9U, 0xfbf0f00bU,
0x41adadecU, 0xb3d4d467U, 0x5fa2a2fdU, 0x45afafeaU,
0x239c9cbfU, 0x53a4a4f7U, 0xe4727296U, 0x9bc0c05bU,
0x75b7b7c2U, 0xe1fdfd1cU, 0x3d9393aeU, 0x4c26266aU,
0x6c36365aU, 0x7e3f3f41U, 0xf5f7f702U, 0x83cccc4fU,
0x6834345cU, 0x51a5a5f4U, 0xd1e5e534U, 0xf9f1f108U,
0xe2717193U, 0xabd8d873U, 0x62313153U, 0x2a15153fU,
0x0804040cU, 0x95c7c752U, 0x46232365U, 0x9dc3c35eU,
0x30181828U, 0x379696a1U, 0x0a05050fU, 0x2f9a9ab5U,
0x0e070709U, 0x24121236U, 0x1b80809bU, 0xdfe2e23dU,
0xcdebeb26U, 0x4e272769U, 0x7fb2b2cdU, 0xea75759fU,
0x1209091bU, 0x1d83839eU, 0x582c2c74U, 0x341a1a2eU,
0x361b1b2dU, 0xdc6e6eb2U, 0xb45a5aeeU, 0x5ba0a0fbU,
0xa45252f6U, 0x763b3b4dU, 0xb7d6d661U, 0x7db3b3ceU,
0x5229297bU, 0xdde3e33eU, 0x5e2f2f71U, 0x13848497U,
0xa65353f5U, 0xb9d1d168U, 0x00000000U, 0xc1eded2cU,
0x40202060U, 0xe3fcfc1fU, 0x79b1b1c8U, 0xb65b5bedU,
0xd46a6abeU, 0x8dcbcb46U, 0x67bebed9U, 0x7239394bU,
0x944a4adeU, 0x984c4cd4U, 0xb05858e8U, 0x85cfcf4aU,
0xbbd0d06bU, 0xc5efef2aU, 0x4faaaae5U, 0xedfbfb16U,
0x864343c5U, 0x9a4d4dd7U, 0x66333355U, 0x11858594U,
0x8a4545cfU, 0xe9f9f910U, 0x04020206U, 0xfe7f7f81U,
0xa05050f0U, 0x783c3c44U, 0x259f9fbaU, 0x4ba8a8e3U,
0xa25151f3U, 0x5da3a3feU, 0x804040c0U, 0x058f8f8aU,
0x3f9292adU, 0x219d9dbcU, 0x70383848U, 0xf1f5f504U,
0x63bcbcdfU, 0x77b6b6c1U, 0xafdada75U, 0x42212163U,
0x20101030U, 0xe5ffff1aU, 0xfdf3f30eU, 0xbfd2d26dU,
0x81cdcd4cU, 0x180c0c14U, 0x26131335U, 0xc3ecec2fU,
0xbe5f5fe1U, 0x359797a2U, 0x884444ccU, 0x2e171739U,
0x93c4c457U, 0x55a7a7f2U, 0xfc7e7e82U, 0x7a3d3d47U,
0xc86464acU, 0xba5d5de7U, 0x3219192bU, 0xe6737395U,
0xc06060a0U, 0x19818198U, 0x9e4f4fd1U, 0xa3dcdc7fU,
0x44222266U, 0x542a2a7eU, 0x3b9090abU, 0x0b888883U,
0x8c4646caU, 0xc7eeee29U, 0x6bb8b8d3U, 0x2814143cU,
0xa7dede79U, 0xbc5e5ee2U, 0x160b0b1dU, 0xaddbdb76U,
0xdbe0e03bU, 0x64323256U, 0x743a3a4eU, 0x140a0a1eU,
0x924949dbU, 0x0c06060aU, 0x4824246cU, 0xb85c5ce4U,
0x9fc2c25dU, 0xbdd3d36eU, 0x43acacefU, 0xc46262a6U,
0x399191a8U, 0x319595a4U, 0xd3e4e437U, 0xf279798bU,
0xd5e7e732U, 0x8bc8c843U, 0x6e373759U, 0xda6d6db7U,
0x018d8d8cU, 0xb1d5d564U, 0x9c4e4ed2U, 0x49a9a9e0U,
0xd86c6cb4U, 0xac5656faU, 0xf3f4f407U, 0xcfeaea25U,
0xca6565afU, 0xf47a7a8eU, 0x47aeaee9U, 0x10080818U,
0x6fbabad5U, 0xf0787888U, 0x4a25256fU, 0x5c2e2e72U,
0x381c1c24U, 0x57a6a6f1U, 0x73b4b4c7U, 0x97c6c651U,
0xcbe8e823U, 0xa1dddd7cU, 0xe874749cU, 0x3e1f1f21U,
0x964b4bddU, 0x61bdbddcU, 0x0d8b8b86U, 0x0f8a8a85U,
0xe0707090U, 0x7c3e3e42U, 0x71b5b5c4U, 0xcc6666aaU,
0x904848d8U, 0x06030305U, 0xf7f6f601U, 0x1c0e0e12U,
0xc26161a3U, 0x6a35355fU, 0xae5757f9U, 0x69b9b9d0U,
0x17868691U, 0x99c1c158U, 0x3a1d1d27U, 0x279e9eb9U,
0xd9e1e138U, 0xebf8f813U, 0x2b9898b3U, 0x22111133U,
0xd26969bbU, 0xa9d9d970U, 0x078e8e89U, 0x339494a7U,
0x2d9b9bb6U, 0x3c1e1e22U, 0x15878792U, 0xc9e9e920U,
0x87cece49U, 0xaa5555ffU, 0x50282878U, 0xa5dfdf7aU,
0x038c8c8fU, 0x59a1a1f8U, 0x09898980U, 0x1a0d0d17U,
0x65bfbfdaU, 0xd7e6e631U, 0x844242c6U, 0xd06868b8U,
0x824141c3U, 0x299999b0U, 0x5a2d2d77U, 0x1e0f0f11U,
0x7bb0b0cbU, 0xa85454fcU, 0x6dbbbbd6U, 0x2c16163aU,
};
__constant__ u32 Te1[256] =
{
0xa5c66363U, 0x84f87c7cU, 0x99ee7777U, 0x8df67b7bU,
0x0dfff2f2U, 0xbdd66b6bU, 0xb1de6f6fU, 0x5491c5c5U,
0x50603030U, 0x03020101U, 0xa9ce6767U, 0x7d562b2bU,
0x19e7fefeU, 0x62b5d7d7U, 0xe64dababU, 0x9aec7676U,
0x458fcacaU, 0x9d1f8282U, 0x4089c9c9U, 0x87fa7d7dU,
0x15effafaU, 0xebb25959U, 0xc98e4747U, 0x0bfbf0f0U,
0xec41adadU, 0x67b3d4d4U, 0xfd5fa2a2U, 0xea45afafU,
0xbf239c9cU, 0xf753a4a4U, 0x96e47272U, 0x5b9bc0c0U,
0xc275b7b7U, 0x1ce1fdfdU, 0xae3d9393U, 0x6a4c2626U,
0x5a6c3636U, 0x417e3f3fU, 0x02f5f7f7U, 0x4f83ccccU,
0x5c683434U, 0xf451a5a5U, 0x34d1e5e5U, 0x08f9f1f1U,
0x93e27171U, 0x73abd8d8U, 0x53623131U, 0x3f2a1515U,
0x0c080404U, 0x5295c7c7U, 0x65462323U, 0x5e9dc3c3U,
0x28301818U, 0xa1379696U, 0x0f0a0505U, 0xb52f9a9aU,
0x090e0707U, 0x36241212U, 0x9b1b8080U, 0x3ddfe2e2U,
0x26cdebebU, 0x694e2727U, 0xcd7fb2b2U, 0x9fea7575U,
0x1b120909U, 0x9e1d8383U, 0x74582c2cU, 0x2e341a1aU,
0x2d361b1bU, 0xb2dc6e6eU, 0xeeb45a5aU, 0xfb5ba0a0U,
0xf6a45252U, 0x4d763b3bU, 0x61b7d6d6U, 0xce7db3b3U,
0x7b522929U, 0x3edde3e3U, 0x715e2f2fU, 0x97138484U,
0xf5a65353U, 0x68b9d1d1U, 0x00000000U, 0x2cc1ededU,
0x60402020U, 0x1fe3fcfcU, 0xc879b1b1U, 0xedb65b5bU,
0xbed46a6aU, 0x468dcbcbU, 0xd967bebeU, 0x4b723939U,
0xde944a4aU, 0xd4984c4cU, 0xe8b05858U, 0x4a85cfcfU,
0x6bbbd0d0U, 0x2ac5efefU, 0xe54faaaaU, 0x16edfbfbU,
0xc5864343U, 0xd79a4d4dU, 0x55663333U, 0x94118585U,
0xcf8a4545U, 0x10e9f9f9U, 0x06040202U, 0x81fe7f7fU,
0xf0a05050U, 0x44783c3cU, 0xba259f9fU, 0xe34ba8a8U,
0xf3a25151U, 0xfe5da3a3U, 0xc0804040U, 0x8a058f8fU,
0xad3f9292U, 0xbc219d9dU, 0x48703838U, 0x04f1f5f5U,
0xdf63bcbcU, 0xc177b6b6U, 0x75afdadaU, 0x63422121U,
0x30201010U, 0x1ae5ffffU, 0x0efdf3f3U, 0x6dbfd2d2U,
0x4c81cdcdU, 0x14180c0cU, 0x35261313U, 0x2fc3ececU,
0xe1be5f5fU, 0xa2359797U, 0xcc884444U, 0x392e1717U,
0x5793c4c4U, 0xf255a7a7U, 0x82fc7e7eU, 0x477a3d3dU,
0xacc86464U, 0xe7ba5d5dU, 0x2b321919U, 0x95e67373U,
0xa0c06060U, 0x98198181U, 0xd19e4f4fU, 0x7fa3dcdcU,
0x66442222U, 0x7e542a2aU, 0xab3b9090U, 0x830b8888U,
0xca8c4646U, 0x29c7eeeeU, 0xd36bb8b8U, 0x3c281414U,
0x79a7dedeU, 0xe2bc5e5eU, 0x1d160b0bU, 0x76addbdbU,
0x3bdbe0e0U, 0x56643232U, 0x4e743a3aU, 0x1e140a0aU,
0xdb924949U, 0x0a0c0606U, 0x6c482424U, 0xe4b85c5cU,
0x5d9fc2c2U, 0x6ebdd3d3U, 0xef43acacU, 0xa6c46262U,
0xa8399191U, 0xa4319595U, 0x37d3e4e4U, 0x8bf27979U,
0x32d5e7e7U, 0x438bc8c8U, 0x596e3737U, 0xb7da6d6dU,
0x8c018d8dU, 0x64b1d5d5U, 0xd29c4e4eU, 0xe049a9a9U,
0xb4d86c6cU, 0xfaac5656U, 0x07f3f4f4U, 0x25cfeaeaU,
0xafca6565U, 0x8ef47a7aU, 0xe947aeaeU, 0x18100808U,
0xd56fbabaU, 0x88f07878U, 0x6f4a2525U, 0x725c2e2eU,
0x24381c1cU, 0xf157a6a6U, 0xc773b4b4U, 0x5197c6c6U,
0x23cbe8e8U, 0x7ca1ddddU, 0x9ce87474U, 0x213e1f1fU,
0xdd964b4bU, 0xdc61bdbdU, 0x860d8b8bU, 0x850f8a8aU,
0x90e07070U, 0x427c3e3eU, 0xc471b5b5U, 0xaacc6666U,
0xd8904848U, 0x05060303U, 0x01f7f6f6U, 0x121c0e0eU,
0xa3c26161U, 0x5f6a3535U, 0xf9ae5757U, 0xd069b9b9U,
0x91178686U, 0x5899c1c1U, 0x273a1d1dU, 0xb9279e9eU,
0x38d9e1e1U, 0x13ebf8f8U, 0xb32b9898U, 0x33221111U,
0xbbd26969U, 0x70a9d9d9U, 0x89078e8eU, 0xa7339494U,
0xb62d9b9bU, 0x223c1e1eU, 0x92158787U, 0x20c9e9e9U,
0x4987ceceU, 0xffaa5555U, 0x78502828U, 0x7aa5dfdfU,
0x8f038c8cU, 0xf859a1a1U, 0x80098989U, 0x171a0d0dU,
0xda65bfbfU, 0x31d7e6e6U, 0xc6844242U, 0xb8d06868U,
0xc3824141U, 0xb0299999U, 0x775a2d2dU, 0x111e0f0fU,
0xcb7bb0b0U, 0xfca85454U, 0xd66dbbbbU, 0x3a2c1616U,
};
__constant__ u32 Te2[256] =
{
0x63a5c663U, 0x7c84f87cU, 0x7799ee77U, 0x7b8df67bU,
0xf20dfff2U, 0x6bbdd66bU, 0x6fb1de6fU, 0xc55491c5U,
0x30506030U, 0x01030201U, 0x67a9ce67U, 0x2b7d562bU,
0xfe19e7feU, 0xd762b5d7U, 0xabe64dabU, 0x769aec76U,
0xca458fcaU, 0x829d1f82U, 0xc94089c9U, 0x7d87fa7dU,
0xfa15effaU, 0x59ebb259U, 0x47c98e47U, 0xf00bfbf0U,
0xadec41adU, 0xd467b3d4U, 0xa2fd5fa2U, 0xafea45afU,
0x9cbf239cU, 0xa4f753a4U, 0x7296e472U, 0xc05b9bc0U,
0xb7c275b7U, 0xfd1ce1fdU, 0x93ae3d93U, 0x266a4c26U,
0x365a6c36U, 0x3f417e3fU, 0xf702f5f7U, 0xcc4f83ccU,
0x345c6834U, 0xa5f451a5U, 0xe534d1e5U, 0xf108f9f1U,
0x7193e271U, 0xd873abd8U, 0x31536231U, 0x153f2a15U,
0x040c0804U, 0xc75295c7U, 0x23654623U, 0xc35e9dc3U,
0x18283018U, 0x96a13796U, 0x050f0a05U, 0x9ab52f9aU,
0x07090e07U, 0x12362412U, 0x809b1b80U, 0xe23ddfe2U,
0xeb26cdebU, 0x27694e27U, 0xb2cd7fb2U, 0x759fea75U,
0x091b1209U, 0x839e1d83U, 0x2c74582cU, 0x1a2e341aU,
0x1b2d361bU, 0x6eb2dc6eU, 0x5aeeb45aU, 0xa0fb5ba0U,
0x52f6a452U, 0x3b4d763bU, 0xd661b7d6U, 0xb3ce7db3U,
0x297b5229U, 0xe33edde3U, 0x2f715e2fU, 0x84971384U,
0x53f5a653U, 0xd168b9d1U, 0x00000000U, 0xed2cc1edU,
0x20604020U, 0xfc1fe3fcU, 0xb1c879b1U, 0x5bedb65bU,
0x6abed46aU, 0xcb468dcbU, 0xbed967beU, 0x394b7239U,
0x4ade944aU, 0x4cd4984cU, 0x58e8b058U, 0xcf4a85cfU,
0xd06bbbd0U, 0xef2ac5efU, 0xaae54faaU, 0xfb16edfbU,
0x43c58643U, 0x4dd79a4dU, 0x33556633U, 0x85941185U,
0x45cf8a45U, 0xf910e9f9U, 0x02060402U, 0x7f81fe7fU,
0x50f0a050U, 0x3c44783cU, 0x9fba259fU, 0xa8e34ba8U,
0x51f3a251U, 0xa3fe5da3U, 0x40c08040U, 0x8f8a058fU,
0x92ad3f92U, 0x9dbc219dU, 0x38487038U, 0xf504f1f5U,
0xbcdf63bcU, 0xb6c177b6U, 0xda75afdaU, 0x21634221U,
0x10302010U, 0xff1ae5ffU, 0xf30efdf3U, 0xd26dbfd2U,
0xcd4c81cdU, 0x0c14180cU, 0x13352613U, 0xec2fc3ecU,
0x5fe1be5fU, 0x97a23597U, 0x44cc8844U, 0x17392e17U,
0xc45793c4U, 0xa7f255a7U, 0x7e82fc7eU, 0x3d477a3dU,
0x64acc864U, 0x5de7ba5dU, 0x192b3219U, 0x7395e673U,
0x60a0c060U, 0x81981981U, 0x4fd19e4fU, 0xdc7fa3dcU,
0x22664422U, 0x2a7e542aU, 0x90ab3b90U, 0x88830b88U,
0x46ca8c46U, 0xee29c7eeU, 0xb8d36bb8U, 0x143c2814U,
0xde79a7deU, 0x5ee2bc5eU, 0x0b1d160bU, 0xdb76addbU,
0xe03bdbe0U, 0x32566432U, 0x3a4e743aU, 0x0a1e140aU,
0x49db9249U, 0x060a0c06U, 0x246c4824U, 0x5ce4b85cU,
0xc25d9fc2U, 0xd36ebdd3U, 0xacef43acU, 0x62a6c462U,
0x91a83991U, 0x95a43195U, 0xe437d3e4U, 0x798bf279U,
0xe732d5e7U, 0xc8438bc8U, 0x37596e37U, 0x6db7da6dU,
0x8d8c018dU, 0xd564b1d5U, 0x4ed29c4eU, 0xa9e049a9U,
0x6cb4d86cU, 0x56faac56U, 0xf407f3f4U, 0xea25cfeaU,
0x65afca65U, 0x7a8ef47aU, 0xaee947aeU, 0x08181008U,
0xbad56fbaU, 0x7888f078U, 0x256f4a25U, 0x2e725c2eU,
0x1c24381cU, 0xa6f157a6U, 0xb4c773b4U, 0xc65197c6U,
0xe823cbe8U, 0xdd7ca1ddU, 0x749ce874U, 0x1f213e1fU,
0x4bdd964bU, 0xbddc61bdU, 0x8b860d8bU, 0x8a850f8aU,
0x7090e070U, 0x3e427c3eU, 0xb5c471b5U, 0x66aacc66U,
0x48d89048U, 0x03050603U, 0xf601f7f6U, 0x0e121c0eU,
0x61a3c261U, 0x355f6a35U, 0x57f9ae57U, 0xb9d069b9U,
0x86911786U, 0xc15899c1U, 0x1d273a1dU, 0x9eb9279eU,
0xe138d9e1U, 0xf813ebf8U, 0x98b32b98U, 0x11332211U,
0x69bbd269U, 0xd970a9d9U, 0x8e89078eU, 0x94a73394U,
0x9bb62d9bU, 0x1e223c1eU, 0x87921587U, 0xe920c9e9U,
0xce4987ceU, 0x55ffaa55U, 0x28785028U, 0xdf7aa5dfU,
0x8c8f038cU, 0xa1f859a1U, 0x89800989U, 0x0d171a0dU,
0xbfda65bfU, 0xe631d7e6U, 0x42c68442U, 0x68b8d068U,
0x41c38241U, 0x99b02999U, 0x2d775a2dU, 0x0f111e0fU,
0xb0cb7bb0U, 0x54fca854U, 0xbbd66dbbU, 0x163a2c16U,
};
__constant__ u32 Te3[256] =
{
0x6363a5c6U, 0x7c7c84f8U, 0x777799eeU, 0x7b7b8df6U,
0xf2f20dffU, 0x6b6bbdd6U, 0x6f6fb1deU, 0xc5c55491U,
0x30305060U, 0x01010302U, 0x6767a9ceU, 0x2b2b7d56U,
0xfefe19e7U, 0xd7d762b5U, 0xababe64dU, 0x76769aecU,
0xcaca458fU, 0x82829d1fU, 0xc9c94089U, 0x7d7d87faU,
0xfafa15efU, 0x5959ebb2U, 0x4747c98eU, 0xf0f00bfbU,
0xadadec41U, 0xd4d467b3U, 0xa2a2fd5fU, 0xafafea45U,
0x9c9cbf23U, 0xa4a4f753U, 0x727296e4U, 0xc0c05b9bU,
0xb7b7c275U, 0xfdfd1ce1U, 0x9393ae3dU, 0x26266a4cU,
0x36365a6cU, 0x3f3f417eU, 0xf7f702f5U, 0xcccc4f83U,
0x34345c68U, 0xa5a5f451U, 0xe5e534d1U, 0xf1f108f9U,
0x717193e2U, 0xd8d873abU, 0x31315362U, 0x15153f2aU,
0x04040c08U, 0xc7c75295U, 0x23236546U, 0xc3c35e9dU,
0x18182830U, 0x9696a137U, 0x05050f0aU, 0x9a9ab52fU,
0x0707090eU, 0x12123624U, 0x80809b1bU, 0xe2e23ddfU,
0xebeb26cdU, 0x2727694eU, 0xb2b2cd7fU, 0x75759feaU,
0x09091b12U, 0x83839e1dU, 0x2c2c7458U, 0x1a1a2e34U,
0x1b1b2d36U, 0x6e6eb2dcU, 0x5a5aeeb4U, 0xa0a0fb5bU,
0x5252f6a4U, 0x3b3b4d76U, 0xd6d661b7U, 0xb3b3ce7dU,
0x29297b52U, 0xe3e33eddU, 0x2f2f715eU, 0x84849713U,
0x5353f5a6U, 0xd1d168b9U, 0x00000000U, 0xeded2cc1U,
0x20206040U, 0xfcfc1fe3U, 0xb1b1c879U, 0x5b5bedb6U,
0x6a6abed4U, 0xcbcb468dU, 0xbebed967U, 0x39394b72U,
0x4a4ade94U, 0x4c4cd498U, 0x5858e8b0U, 0xcfcf4a85U,
0xd0d06bbbU, 0xefef2ac5U, 0xaaaae54fU, 0xfbfb16edU,
0x4343c586U, 0x4d4dd79aU, 0x33335566U, 0x85859411U,
0x4545cf8aU, 0xf9f910e9U, 0x02020604U, 0x7f7f81feU,
0x5050f0a0U, 0x3c3c4478U, 0x9f9fba25U, 0xa8a8e34bU,
0x5151f3a2U, 0xa3a3fe5dU, 0x4040c080U, 0x8f8f8a05U,
0x9292ad3fU, 0x9d9dbc21U, 0x38384870U, 0xf5f504f1U,
0xbcbcdf63U, 0xb6b6c177U, 0xdada75afU, 0x21216342U,
0x10103020U, 0xffff1ae5U, 0xf3f30efdU, 0xd2d26dbfU,
0xcdcd4c81U, 0x0c0c1418U, 0x13133526U, 0xecec2fc3U,
0x5f5fe1beU, 0x9797a235U, 0x4444cc88U, 0x1717392eU,
0xc4c45793U, 0xa7a7f255U, 0x7e7e82fcU, 0x3d3d477aU,
0x6464acc8U, 0x5d5de7baU, 0x19192b32U, 0x737395e6U,
0x6060a0c0U, 0x81819819U, 0x4f4fd19eU, 0xdcdc7fa3U,
0x22226644U, 0x2a2a7e54U, 0x9090ab3bU, 0x8888830bU,
0x4646ca8cU, 0xeeee29c7U, 0xb8b8d36bU, 0x14143c28U,
0xdede79a7U, 0x5e5ee2bcU, 0x0b0b1d16U, 0xdbdb76adU,
0xe0e03bdbU, 0x32325664U, 0x3a3a4e74U, 0x0a0a1e14U,
0x4949db92U, 0x06060a0cU, 0x24246c48U, 0x5c5ce4b8U,
0xc2c25d9fU, 0xd3d36ebdU, 0xacacef43U, 0x6262a6c4U,
0x9191a839U, 0x9595a431U, 0xe4e437d3U, 0x79798bf2U,
0xe7e732d5U, 0xc8c8438bU, 0x3737596eU, 0x6d6db7daU,
0x8d8d8c01U, 0xd5d564b1U, 0x4e4ed29cU, 0xa9a9e049U,
0x6c6cb4d8U, 0x5656faacU, 0xf4f407f3U, 0xeaea25cfU,
0x6565afcaU, 0x7a7a8ef4U, 0xaeaee947U, 0x08081810U,
0xbabad56fU, 0x787888f0U, 0x25256f4aU, 0x2e2e725cU,
0x1c1c2438U, 0xa6a6f157U, 0xb4b4c773U, 0xc6c65197U,
0xe8e823cbU, 0xdddd7ca1U, 0x74749ce8U, 0x1f1f213eU,
0x4b4bdd96U, 0xbdbddc61U, 0x8b8b860dU, 0x8a8a850fU,
0x707090e0U, 0x3e3e427cU, 0xb5b5c471U, 0x6666aaccU,
0x4848d890U, 0x03030506U, 0xf6f601f7U, 0x0e0e121cU,
0x6161a3c2U, 0x35355f6aU, 0x5757f9aeU, 0xb9b9d069U,
0x86869117U, 0xc1c15899U, 0x1d1d273aU, 0x9e9eb927U,
0xe1e138d9U, 0xf8f813ebU, 0x9898b32bU, 0x11113322U,
0x6969bbd2U, 0xd9d970a9U, 0x8e8e8907U, 0x9494a733U,
0x9b9bb62dU, 0x1e1e223cU, 0x87879215U, 0xe9e920c9U,
0xcece4987U, 0x5555ffaaU, 0x28287850U, 0xdfdf7aa5U,
0x8c8c8f03U, 0xa1a1f859U, 0x89898009U, 0x0d0d171aU,
0xbfbfda65U, 0xe6e631d7U, 0x4242c684U, 0x6868b8d0U,
0x4141c382U, 0x9999b029U, 0x2d2d775aU, 0x0f0f111eU,
0xb0b0cb7bU, 0x5454fca8U, 0xbbbbd66dU, 0x16163a2cU,
};
__constant__ u32 Te4[256] =
{
0x63636363U, 0x7c7c7c7cU, 0x77777777U, 0x7b7b7b7bU,
0xf2f2f2f2U, 0x6b6b6b6bU, 0x6f6f6f6fU, 0xc5c5c5c5U,
0x30303030U, 0x01010101U, 0x67676767U, 0x2b2b2b2bU,
0xfefefefeU, 0xd7d7d7d7U, 0xababababU, 0x76767676U,
0xcacacacaU, 0x82828282U, 0xc9c9c9c9U, 0x7d7d7d7dU,
0xfafafafaU, 0x59595959U, 0x47474747U, 0xf0f0f0f0U,
0xadadadadU, 0xd4d4d4d4U, 0xa2a2a2a2U, 0xafafafafU,
0x9c9c9c9cU, 0xa4a4a4a4U, 0x72727272U, 0xc0c0c0c0U,
0xb7b7b7b7U, 0xfdfdfdfdU, 0x93939393U, 0x26262626U,
0x36363636U, 0x3f3f3f3fU, 0xf7f7f7f7U, 0xccccccccU,
0x34343434U, 0xa5a5a5a5U, 0xe5e5e5e5U, 0xf1f1f1f1U,
0x71717171U, 0xd8d8d8d8U, 0x31313131U, 0x15151515U,
0x04040404U, 0xc7c7c7c7U, 0x23232323U, 0xc3c3c3c3U,
0x18181818U, 0x96969696U, 0x05050505U, 0x9a9a9a9aU,
0x07070707U, 0x12121212U, 0x80808080U, 0xe2e2e2e2U,
0xebebebebU, 0x27272727U, 0xb2b2b2b2U, 0x75757575U,
0x09090909U, 0x83838383U, 0x2c2c2c2cU, 0x1a1a1a1aU,
0x1b1b1b1bU, 0x6e6e6e6eU, 0x5a5a5a5aU, 0xa0a0a0a0U,
0x52525252U, 0x3b3b3b3bU, 0xd6d6d6d6U, 0xb3b3b3b3U,
0x29292929U, 0xe3e3e3e3U, 0x2f2f2f2fU, 0x84848484U,
0x53535353U, 0xd1d1d1d1U, 0x00000000U, 0xededededU,
0x20202020U, 0xfcfcfcfcU, 0xb1b1b1b1U, 0x5b5b5b5bU,
0x6a6a6a6aU, 0xcbcbcbcbU, 0xbebebebeU, 0x39393939U,
0x4a4a4a4aU, 0x4c4c4c4cU, 0x58585858U, 0xcfcfcfcfU,
0xd0d0d0d0U, 0xefefefefU, 0xaaaaaaaaU, 0xfbfbfbfbU,
0x43434343U, 0x4d4d4d4dU, 0x33333333U, 0x85858585U,
0x45454545U, 0xf9f9f9f9U, 0x02020202U, 0x7f7f7f7fU,
0x50505050U, 0x3c3c3c3cU, 0x9f9f9f9fU, 0xa8a8a8a8U,
0x51515151U, 0xa3a3a3a3U, 0x40404040U, 0x8f8f8f8fU,
0x92929292U, 0x9d9d9d9dU, 0x38383838U, 0xf5f5f5f5U,
0xbcbcbcbcU, 0xb6b6b6b6U, 0xdadadadaU, 0x21212121U,
0x10101010U, 0xffffffffU, 0xf3f3f3f3U, 0xd2d2d2d2U,
0xcdcdcdcdU, 0x0c0c0c0cU, 0x13131313U, 0xececececU,
0x5f5f5f5fU, 0x97979797U, 0x44444444U, 0x17171717U,
0xc4c4c4c4U, 0xa7a7a7a7U, 0x7e7e7e7eU, 0x3d3d3d3dU,
0x64646464U, 0x5d5d5d5dU, 0x19191919U, 0x73737373U,
0x60606060U, 0x81818181U, 0x4f4f4f4fU, 0xdcdcdcdcU,
0x22222222U, 0x2a2a2a2aU, 0x90909090U, 0x88888888U,
0x46464646U, 0xeeeeeeeeU, 0xb8b8b8b8U, 0x14141414U,
0xdedededeU, 0x5e5e5e5eU, 0x0b0b0b0bU, 0xdbdbdbdbU,
0xe0e0e0e0U, 0x32323232U, 0x3a3a3a3aU, 0x0a0a0a0aU,
0x49494949U, 0x06060606U, 0x24242424U, 0x5c5c5c5cU,
0xc2c2c2c2U, 0xd3d3d3d3U, 0xacacacacU, 0x62626262U,
0x91919191U, 0x95959595U, 0xe4e4e4e4U, 0x79797979U,
0xe7e7e7e7U, 0xc8c8c8c8U, 0x37373737U, 0x6d6d6d6dU,
0x8d8d8d8dU, 0xd5d5d5d5U, 0x4e4e4e4eU, 0xa9a9a9a9U,
0x6c6c6c6cU, 0x56565656U, 0xf4f4f4f4U, 0xeaeaeaeaU,
0x65656565U, 0x7a7a7a7aU, 0xaeaeaeaeU, 0x08080808U,
0xbabababaU, 0x78787878U, 0x25252525U, 0x2e2e2e2eU,
0x1c1c1c1cU, 0xa6a6a6a6U, 0xb4b4b4b4U, 0xc6c6c6c6U,
0xe8e8e8e8U, 0xddddddddU, 0x74747474U, 0x1f1f1f1fU,
0x4b4b4b4bU, 0xbdbdbdbdU, 0x8b8b8b8bU, 0x8a8a8a8aU,
0x70707070U, 0x3e3e3e3eU, 0xb5b5b5b5U, 0x66666666U,
0x48484848U, 0x03030303U, 0xf6f6f6f6U, 0x0e0e0e0eU,
0x61616161U, 0x35353535U, 0x57575757U, 0xb9b9b9b9U,
0x86868686U, 0xc1c1c1c1U, 0x1d1d1d1dU, 0x9e9e9e9eU,
0xe1e1e1e1U, 0xf8f8f8f8U, 0x98989898U, 0x11111111U,
0x69696969U, 0xd9d9d9d9U, 0x8e8e8e8eU, 0x94949494U,
0x9b9b9b9bU, 0x1e1e1e1eU, 0x87878787U, 0xe9e9e9e9U,
0xcecececeU, 0x55555555U, 0x28282828U, 0xdfdfdfdfU,
0x8c8c8c8cU, 0xa1a1a1a1U, 0x89898989U, 0x0d0d0d0dU,
0xbfbfbfbfU, 0xe6e6e6e6U, 0x42424242U, 0x68686868U,
0x41414141U, 0x99999999U, 0x2d2d2d2dU, 0x0f0f0f0fU,
0xb0b0b0b0U, 0x54545454U, 0xbbbbbbbbU, 0x16161616U,
};
__constant__ u32 Td0[256] =
{
0x51f4a750U, 0x7e416553U, 0x1a17a4c3U, 0x3a275e96U,
0x3bab6bcbU, 0x1f9d45f1U, 0xacfa58abU, 0x4be30393U,
0x2030fa55U, 0xad766df6U, 0x88cc7691U, 0xf5024c25U,
0x4fe5d7fcU, 0xc52acbd7U, 0x26354480U, 0xb562a38fU,
0xdeb15a49U, 0x25ba1b67U, 0x45ea0e98U, 0x5dfec0e1U,
0xc32f7502U, 0x814cf012U, 0x8d4697a3U, 0x6bd3f9c6U,
0x038f5fe7U, 0x15929c95U, 0xbf6d7aebU, 0x955259daU,
0xd4be832dU, 0x587421d3U, 0x49e06929U, 0x8ec9c844U,
0x75c2896aU, 0xf48e7978U, 0x99583e6bU, 0x27b971ddU,
0xbee14fb6U, 0xf088ad17U, 0xc920ac66U, 0x7dce3ab4U,
0x63df4a18U, 0xe51a3182U, 0x97513360U, 0x62537f45U,
0xb16477e0U, 0xbb6bae84U, 0xfe81a01cU, 0xf9082b94U,
0x70486858U, 0x8f45fd19U, 0x94de6c87U, 0x527bf8b7U,
0xab73d323U, 0x724b02e2U, 0xe31f8f57U, 0x6655ab2aU,
0xb2eb2807U, 0x2fb5c203U, 0x86c57b9aU, 0xd33708a5U,
0x302887f2U, 0x23bfa5b2U, 0x02036abaU, 0xed16825cU,
0x8acf1c2bU, 0xa779b492U, 0xf307f2f0U, 0x4e69e2a1U,
0x65daf4cdU, 0x0605bed5U, 0xd134621fU, 0xc4a6fe8aU,
0x342e539dU, 0xa2f355a0U, 0x058ae132U, 0xa4f6eb75U,
0x0b83ec39U, 0x4060efaaU, 0x5e719f06U, 0xbd6e1051U,
0x3e218af9U, 0x96dd063dU, 0xdd3e05aeU, 0x4de6bd46U,
0x91548db5U, 0x71c45d05U, 0x0406d46fU, 0x605015ffU,
0x1998fb24U, 0xd6bde997U, 0x894043ccU, 0x67d99e77U,
0xb0e842bdU, 0x07898b88U, 0xe7195b38U, 0x79c8eedbU,
0xa17c0a47U, 0x7c420fe9U, 0xf8841ec9U, 0x00000000U,
0x09808683U, 0x322bed48U, 0x1e1170acU, 0x6c5a724eU,
0xfd0efffbU, 0x0f853856U, 0x3daed51eU, 0x362d3927U,
0x0a0fd964U, 0x685ca621U, 0x9b5b54d1U, 0x24362e3aU,
0x0c0a67b1U, 0x9357e70fU, 0xb4ee96d2U, 0x1b9b919eU,
0x80c0c54fU, 0x61dc20a2U, 0x5a774b69U, 0x1c121a16U,
0xe293ba0aU, 0xc0a02ae5U, 0x3c22e043U, 0x121b171dU,
0x0e090d0bU, 0xf28bc7adU, 0x2db6a8b9U, 0x141ea9c8U,
0x57f11985U, 0xaf75074cU, 0xee99ddbbU, 0xa37f60fdU,
0xf701269fU, 0x5c72f5bcU, 0x44663bc5U, 0x5bfb7e34U,
0x8b432976U, 0xcb23c6dcU, 0xb6edfc68U, 0xb8e4f163U,
0xd731dccaU, 0x42638510U, 0x13972240U, 0x84c61120U,
0x854a247dU, 0xd2bb3df8U, 0xaef93211U, 0xc729a16dU,
0x1d9e2f4bU, 0xdcb230f3U, 0x0d8652ecU, 0x77c1e3d0U,
0x2bb3166cU, 0xa970b999U, 0x119448faU, 0x47e96422U,
0xa8fc8cc4U, 0xa0f03f1aU, 0x567d2cd8U, 0x223390efU,
0x87494ec7U, 0xd938d1c1U, 0x8ccaa2feU, 0x98d40b36U,
0xa6f581cfU, 0xa57ade28U, 0xdab78e26U, 0x3fadbfa4U,
0x2c3a9de4U, 0x5078920dU, 0x6a5fcc9bU, 0x547e4662U,
0xf68d13c2U, 0x90d8b8e8U, 0x2e39f75eU, 0x82c3aff5U,
0x9f5d80beU, 0x69d0937cU, 0x6fd52da9U, 0xcf2512b3U,
0xc8ac993bU, 0x10187da7U, 0xe89c636eU, 0xdb3bbb7bU,
0xcd267809U, 0x6e5918f4U, 0xec9ab701U, 0x834f9aa8U,
0xe6956e65U, 0xaaffe67eU, 0x21bccf08U, 0xef15e8e6U,
0xbae79bd9U, 0x4a6f36ceU, 0xea9f09d4U, 0x29b07cd6U,
0x31a4b2afU, 0x2a3f2331U, 0xc6a59430U, 0x35a266c0U,
0x744ebc37U, 0xfc82caa6U, 0xe090d0b0U, 0x33a7d815U,
0xf104984aU, 0x41ecdaf7U, 0x7fcd500eU, 0x1791f62fU,
0x764dd68dU, 0x43efb04dU, 0xccaa4d54U, 0xe49604dfU,
0x9ed1b5e3U, 0x4c6a881bU, 0xc12c1fb8U, 0x4665517fU,
0x9d5eea04U, 0x018c355dU, 0xfa877473U, 0xfb0b412eU,
0xb3671d5aU, 0x92dbd252U, 0xe9105633U, 0x6dd64713U,
0x9ad7618cU, 0x37a10c7aU, 0x59f8148eU, 0xeb133c89U,
0xcea927eeU, 0xb761c935U, 0xe11ce5edU, 0x7a47b13cU,
0x9cd2df59U, 0x55f2733fU, 0x1814ce79U, 0x73c737bfU,
0x53f7cdeaU, 0x5ffdaa5bU, 0xdf3d6f14U, 0x7844db86U,
0xcaaff381U, 0xb968c43eU, 0x3824342cU, 0xc2a3405fU,
0x161dc372U, 0xbce2250cU, 0x283c498bU, 0xff0d9541U,
0x39a80171U, 0x080cb3deU, 0xd8b4e49cU, 0x6456c190U,
0x7bcb8461U, 0xd532b670U, 0x486c5c74U, 0xd0b85742U,
};
__constant__ u32 Td1[256] =
{
0x5051f4a7U, 0x537e4165U, 0xc31a17a4U, 0x963a275eU,
0xcb3bab6bU, 0xf11f9d45U, 0xabacfa58U, 0x934be303U,
0x552030faU, 0xf6ad766dU, 0x9188cc76U, 0x25f5024cU,
0xfc4fe5d7U, 0xd7c52acbU, 0x80263544U, 0x8fb562a3U,
0x49deb15aU, 0x6725ba1bU, 0x9845ea0eU, 0xe15dfec0U,
0x02c32f75U, 0x12814cf0U, 0xa38d4697U, 0xc66bd3f9U,
0xe7038f5fU, 0x9515929cU, 0xebbf6d7aU, 0xda955259U,
0x2dd4be83U, 0xd3587421U, 0x2949e069U, 0x448ec9c8U,
0x6a75c289U, 0x78f48e79U, 0x6b99583eU, 0xdd27b971U,
0xb6bee14fU, 0x17f088adU, 0x66c920acU, 0xb47dce3aU,
0x1863df4aU, 0x82e51a31U, 0x60975133U, 0x4562537fU,
0xe0b16477U, 0x84bb6baeU, 0x1cfe81a0U, 0x94f9082bU,
0x58704868U, 0x198f45fdU, 0x8794de6cU, 0xb7527bf8U,
0x23ab73d3U, 0xe2724b02U, 0x57e31f8fU, 0x2a6655abU,
0x07b2eb28U, 0x032fb5c2U, 0x9a86c57bU, 0xa5d33708U,
0xf2302887U, 0xb223bfa5U, 0xba02036aU, 0x5ced1682U,
0x2b8acf1cU, 0x92a779b4U, 0xf0f307f2U, 0xa14e69e2U,
0xcd65daf4U, 0xd50605beU, 0x1fd13462U, 0x8ac4a6feU,
0x9d342e53U, 0xa0a2f355U, 0x32058ae1U, 0x75a4f6ebU,
0x390b83ecU, 0xaa4060efU, 0x065e719fU, 0x51bd6e10U,
0xf93e218aU, 0x3d96dd06U, 0xaedd3e05U, 0x464de6bdU,
0xb591548dU, 0x0571c45dU, 0x6f0406d4U, 0xff605015U,
0x241998fbU, 0x97d6bde9U, 0xcc894043U, 0x7767d99eU,
0xbdb0e842U, 0x8807898bU, 0x38e7195bU, 0xdb79c8eeU,
0x47a17c0aU, 0xe97c420fU, 0xc9f8841eU, 0x00000000U,
0x83098086U, 0x48322bedU, 0xac1e1170U, 0x4e6c5a72U,
0xfbfd0effU, 0x560f8538U, 0x1e3daed5U, 0x27362d39U,
0x640a0fd9U, 0x21685ca6U, 0xd19b5b54U, 0x3a24362eU,
0xb10c0a67U, 0x0f9357e7U, 0xd2b4ee96U, 0x9e1b9b91U,
0x4f80c0c5U, 0xa261dc20U, 0x695a774bU, 0x161c121aU,
0x0ae293baU, 0xe5c0a02aU, 0x433c22e0U, 0x1d121b17U,
0x0b0e090dU, 0xadf28bc7U, 0xb92db6a8U, 0xc8141ea9U,
0x8557f119U, 0x4caf7507U, 0xbbee99ddU, 0xfda37f60U,
0x9ff70126U, 0xbc5c72f5U, 0xc544663bU, 0x345bfb7eU,
0x768b4329U, 0xdccb23c6U, 0x68b6edfcU, 0x63b8e4f1U,
0xcad731dcU, 0x10426385U, 0x40139722U, 0x2084c611U,
0x7d854a24U, 0xf8d2bb3dU, 0x11aef932U, 0x6dc729a1U,
0x4b1d9e2fU, 0xf3dcb230U, 0xec0d8652U, 0xd077c1e3U,
0x6c2bb316U, 0x99a970b9U, 0xfa119448U, 0x2247e964U,
0xc4a8fc8cU, 0x1aa0f03fU, 0xd8567d2cU, 0xef223390U,
0xc787494eU, 0xc1d938d1U, 0xfe8ccaa2U, 0x3698d40bU,
0xcfa6f581U, 0x28a57adeU, 0x26dab78eU, 0xa43fadbfU,
0xe42c3a9dU, 0x0d507892U, 0x9b6a5fccU, 0x62547e46U,
0xc2f68d13U, 0xe890d8b8U, 0x5e2e39f7U, 0xf582c3afU,
0xbe9f5d80U, 0x7c69d093U, 0xa96fd52dU, 0xb3cf2512U,
0x3bc8ac99U, 0xa710187dU, 0x6ee89c63U, 0x7bdb3bbbU,
0x09cd2678U, 0xf46e5918U, 0x01ec9ab7U, 0xa8834f9aU,
0x65e6956eU, 0x7eaaffe6U, 0x0821bccfU, 0xe6ef15e8U,
0xd9bae79bU, 0xce4a6f36U, 0xd4ea9f09U, 0xd629b07cU,
0xaf31a4b2U, 0x312a3f23U, 0x30c6a594U, 0xc035a266U,
0x37744ebcU, 0xa6fc82caU, 0xb0e090d0U, 0x1533a7d8U,
0x4af10498U, 0xf741ecdaU, 0x0e7fcd50U, 0x2f1791f6U,
0x8d764dd6U, 0x4d43efb0U, 0x54ccaa4dU, 0xdfe49604U,
0xe39ed1b5U, 0x1b4c6a88U, 0xb8c12c1fU, 0x7f466551U,
0x049d5eeaU, 0x5d018c35U, 0x73fa8774U, 0x2efb0b41U,
0x5ab3671dU, 0x5292dbd2U, 0x33e91056U, 0x136dd647U,
0x8c9ad761U, 0x7a37a10cU, 0x8e59f814U, 0x89eb133cU,
0xeecea927U, 0x35b761c9U, 0xede11ce5U, 0x3c7a47b1U,
0x599cd2dfU, 0x3f55f273U, 0x791814ceU, 0xbf73c737U,
0xea53f7cdU, 0x5b5ffdaaU, 0x14df3d6fU, 0x867844dbU,
0x81caaff3U, 0x3eb968c4U, 0x2c382434U, 0x5fc2a340U,
0x72161dc3U, 0x0cbce225U, 0x8b283c49U, 0x41ff0d95U,
0x7139a801U, 0xde080cb3U, 0x9cd8b4e4U, 0x906456c1U,
0x617bcb84U, 0x70d532b6U, 0x74486c5cU, 0x42d0b857U,
};
__constant__ u32 Td2[256] =
{
0xa75051f4U, 0x65537e41U, 0xa4c31a17U, 0x5e963a27U,
0x6bcb3babU, 0x45f11f9dU, 0x58abacfaU, 0x03934be3U,
0xfa552030U, 0x6df6ad76U, 0x769188ccU, 0x4c25f502U,
0xd7fc4fe5U, 0xcbd7c52aU, 0x44802635U, 0xa38fb562U,
0x5a49deb1U, 0x1b6725baU, 0x0e9845eaU, 0xc0e15dfeU,
0x7502c32fU, 0xf012814cU, 0x97a38d46U, 0xf9c66bd3U,
0x5fe7038fU, 0x9c951592U, 0x7aebbf6dU, 0x59da9552U,
0x832dd4beU, 0x21d35874U, 0x692949e0U, 0xc8448ec9U,
0x896a75c2U, 0x7978f48eU, 0x3e6b9958U, 0x71dd27b9U,
0x4fb6bee1U, 0xad17f088U, 0xac66c920U, 0x3ab47dceU,
0x4a1863dfU, 0x3182e51aU, 0x33609751U, 0x7f456253U,
0x77e0b164U, 0xae84bb6bU, 0xa01cfe81U, 0x2b94f908U,
0x68587048U, 0xfd198f45U, 0x6c8794deU, 0xf8b7527bU,
0xd323ab73U, 0x02e2724bU, 0x8f57e31fU, 0xab2a6655U,
0x2807b2ebU, 0xc2032fb5U, 0x7b9a86c5U, 0x08a5d337U,
0x87f23028U, 0xa5b223bfU, 0x6aba0203U, 0x825ced16U,
0x1c2b8acfU, 0xb492a779U, 0xf2f0f307U, 0xe2a14e69U,
0xf4cd65daU, 0xbed50605U, 0x621fd134U, 0xfe8ac4a6U,
0x539d342eU, 0x55a0a2f3U, 0xe132058aU, 0xeb75a4f6U,
0xec390b83U, 0xefaa4060U, 0x9f065e71U, 0x1051bd6eU,
0x8af93e21U, 0x063d96ddU, 0x05aedd3eU, 0xbd464de6U,
0x8db59154U, 0x5d0571c4U, 0xd46f0406U, 0x15ff6050U,
0xfb241998U, 0xe997d6bdU, 0x43cc8940U, 0x9e7767d9U,
0x42bdb0e8U, 0x8b880789U, 0x5b38e719U, 0xeedb79c8U,
0x0a47a17cU, 0x0fe97c42U, 0x1ec9f884U, 0x00000000U,
0x86830980U, 0xed48322bU, 0x70ac1e11U, 0x724e6c5aU,
0xfffbfd0eU, 0x38560f85U, 0xd51e3daeU, 0x3927362dU,
0xd9640a0fU, 0xa621685cU, 0x54d19b5bU, 0x2e3a2436U,
0x67b10c0aU, 0xe70f9357U, 0x96d2b4eeU, 0x919e1b9bU,
0xc54f80c0U, 0x20a261dcU, 0x4b695a77U, 0x1a161c12U,
0xba0ae293U, 0x2ae5c0a0U, 0xe0433c22U, 0x171d121bU,
0x0d0b0e09U, 0xc7adf28bU, 0xa8b92db6U, 0xa9c8141eU,
0x198557f1U, 0x074caf75U, 0xddbbee99U, 0x60fda37fU,
0x269ff701U, 0xf5bc5c72U, 0x3bc54466U, 0x7e345bfbU,
0x29768b43U, 0xc6dccb23U, 0xfc68b6edU, 0xf163b8e4U,
0xdccad731U, 0x85104263U, 0x22401397U, 0x112084c6U,
0x247d854aU, 0x3df8d2bbU, 0x3211aef9U, 0xa16dc729U,
0x2f4b1d9eU, 0x30f3dcb2U, 0x52ec0d86U, 0xe3d077c1U,
0x166c2bb3U, 0xb999a970U, 0x48fa1194U, 0x642247e9U,
0x8cc4a8fcU, 0x3f1aa0f0U, 0x2cd8567dU, 0x90ef2233U,
0x4ec78749U, 0xd1c1d938U, 0xa2fe8ccaU, 0x0b3698d4U,
0x81cfa6f5U, 0xde28a57aU, 0x8e26dab7U, 0xbfa43fadU,
0x9de42c3aU, 0x920d5078U, 0xcc9b6a5fU, 0x4662547eU,
0x13c2f68dU, 0xb8e890d8U, 0xf75e2e39U, 0xaff582c3U,
0x80be9f5dU, 0x937c69d0U, 0x2da96fd5U, 0x12b3cf25U,
0x993bc8acU, 0x7da71018U, 0x636ee89cU, 0xbb7bdb3bU,
0x7809cd26U, 0x18f46e59U, 0xb701ec9aU, 0x9aa8834fU,
0x6e65e695U, 0xe67eaaffU, 0xcf0821bcU, 0xe8e6ef15U,
0x9bd9bae7U, 0x36ce4a6fU, 0x09d4ea9fU, 0x7cd629b0U,
0xb2af31a4U, 0x23312a3fU, 0x9430c6a5U, 0x66c035a2U,
0xbc37744eU, 0xcaa6fc82U, 0xd0b0e090U, 0xd81533a7U,
0x984af104U, 0xdaf741ecU, 0x500e7fcdU, 0xf62f1791U,
0xd68d764dU, 0xb04d43efU, 0x4d54ccaaU, 0x04dfe496U,
0xb5e39ed1U, 0x881b4c6aU, 0x1fb8c12cU, 0x517f4665U,
0xea049d5eU, 0x355d018cU, 0x7473fa87U, 0x412efb0bU,
0x1d5ab367U, 0xd25292dbU, 0x5633e910U, 0x47136dd6U,
0x618c9ad7U, 0x0c7a37a1U, 0x148e59f8U, 0x3c89eb13U,
0x27eecea9U, 0xc935b761U, 0xe5ede11cU, 0xb13c7a47U,
0xdf599cd2U, 0x733f55f2U, 0xce791814U, 0x37bf73c7U,
0xcdea53f7U, 0xaa5b5ffdU, 0x6f14df3dU, 0xdb867844U,
0xf381caafU, 0xc43eb968U, 0x342c3824U, 0x405fc2a3U,
0xc372161dU, 0x250cbce2U, 0x498b283cU, 0x9541ff0dU,
0x017139a8U, 0xb3de080cU, 0xe49cd8b4U, 0xc1906456U,
0x84617bcbU, 0xb670d532U, 0x5c74486cU, 0x5742d0b8U,
};
__constant__ u32 Td3[256] =
{
0xf4a75051U, 0x4165537eU, 0x17a4c31aU, 0x275e963aU,
0xab6bcb3bU, 0x9d45f11fU, 0xfa58abacU, 0xe303934bU,
0x30fa5520U, 0x766df6adU, 0xcc769188U, 0x024c25f5U,
0xe5d7fc4fU, 0x2acbd7c5U, 0x35448026U, 0x62a38fb5U,
0xb15a49deU, 0xba1b6725U, 0xea0e9845U, 0xfec0e15dU,
0x2f7502c3U, 0x4cf01281U, 0x4697a38dU, 0xd3f9c66bU,
0x8f5fe703U, 0x929c9515U, 0x6d7aebbfU, 0x5259da95U,
0xbe832dd4U, 0x7421d358U, 0xe0692949U, 0xc9c8448eU,
0xc2896a75U, 0x8e7978f4U, 0x583e6b99U, 0xb971dd27U,
0xe14fb6beU, 0x88ad17f0U, 0x20ac66c9U, 0xce3ab47dU,
0xdf4a1863U, 0x1a3182e5U, 0x51336097U, 0x537f4562U,
0x6477e0b1U, 0x6bae84bbU, 0x81a01cfeU, 0x082b94f9U,
0x48685870U, 0x45fd198fU, 0xde6c8794U, 0x7bf8b752U,
0x73d323abU, 0x4b02e272U, 0x1f8f57e3U, 0x55ab2a66U,
0xeb2807b2U, 0xb5c2032fU, 0xc57b9a86U, 0x3708a5d3U,
0x2887f230U, 0xbfa5b223U, 0x036aba02U, 0x16825cedU,
0xcf1c2b8aU, 0x79b492a7U, 0x07f2f0f3U, 0x69e2a14eU,
0xdaf4cd65U, 0x05bed506U, 0x34621fd1U, 0xa6fe8ac4U,
0x2e539d34U, 0xf355a0a2U, 0x8ae13205U, 0xf6eb75a4U,
0x83ec390bU, 0x60efaa40U, 0x719f065eU, 0x6e1051bdU,
0x218af93eU, 0xdd063d96U, 0x3e05aeddU, 0xe6bd464dU,
0x548db591U, 0xc45d0571U, 0x06d46f04U, 0x5015ff60U,
0x98fb2419U, 0xbde997d6U, 0x4043cc89U, 0xd99e7767U,
0xe842bdb0U, 0x898b8807U, 0x195b38e7U, 0xc8eedb79U,
0x7c0a47a1U, 0x420fe97cU, 0x841ec9f8U, 0x00000000U,
0x80868309U, 0x2bed4832U, 0x1170ac1eU, 0x5a724e6cU,
0x0efffbfdU, 0x8538560fU, 0xaed51e3dU, 0x2d392736U,
0x0fd9640aU, 0x5ca62168U, 0x5b54d19bU, 0x362e3a24U,
0x0a67b10cU, 0x57e70f93U, 0xee96d2b4U, 0x9b919e1bU,
0xc0c54f80U, 0xdc20a261U, 0x774b695aU, 0x121a161cU,
0x93ba0ae2U, 0xa02ae5c0U, 0x22e0433cU, 0x1b171d12U,
0x090d0b0eU, 0x8bc7adf2U, 0xb6a8b92dU, 0x1ea9c814U,
0xf1198557U, 0x75074cafU, 0x99ddbbeeU, 0x7f60fda3U,
0x01269ff7U, 0x72f5bc5cU, 0x663bc544U, 0xfb7e345bU,
0x4329768bU, 0x23c6dccbU, 0xedfc68b6U, 0xe4f163b8U,
0x31dccad7U, 0x63851042U, 0x97224013U, 0xc6112084U,
0x4a247d85U, 0xbb3df8d2U, 0xf93211aeU, 0x29a16dc7U,
0x9e2f4b1dU, 0xb230f3dcU, 0x8652ec0dU, 0xc1e3d077U,
0xb3166c2bU, 0x70b999a9U, 0x9448fa11U, 0xe9642247U,
0xfc8cc4a8U, 0xf03f1aa0U, 0x7d2cd856U, 0x3390ef22U,
0x494ec787U, 0x38d1c1d9U, 0xcaa2fe8cU, 0xd40b3698U,
0xf581cfa6U, 0x7ade28a5U, 0xb78e26daU, 0xadbfa43fU,
0x3a9de42cU, 0x78920d50U, 0x5fcc9b6aU, 0x7e466254U,
0x8d13c2f6U, 0xd8b8e890U, 0x39f75e2eU, 0xc3aff582U,
0x5d80be9fU, 0xd0937c69U, 0xd52da96fU, 0x2512b3cfU,
0xac993bc8U, 0x187da710U, 0x9c636ee8U, 0x3bbb7bdbU,
0x267809cdU, 0x5918f46eU, 0x9ab701ecU, 0x4f9aa883U,
0x956e65e6U, 0xffe67eaaU, 0xbccf0821U, 0x15e8e6efU,
0xe79bd9baU, 0x6f36ce4aU, 0x9f09d4eaU, 0xb07cd629U,
0xa4b2af31U, 0x3f23312aU, 0xa59430c6U, 0xa266c035U,
0x4ebc3774U, 0x82caa6fcU, 0x90d0b0e0U, 0xa7d81533U,
0x04984af1U, 0xecdaf741U, 0xcd500e7fU, 0x91f62f17U,
0x4dd68d76U, 0xefb04d43U, 0xaa4d54ccU, 0x9604dfe4U,
0xd1b5e39eU, 0x6a881b4cU, 0x2c1fb8c1U, 0x65517f46U,
0x5eea049dU, 0x8c355d01U, 0x877473faU, 0x0b412efbU,
0x671d5ab3U, 0xdbd25292U, 0x105633e9U, 0xd647136dU,
0xd7618c9aU, 0xa10c7a37U, 0xf8148e59U, 0x133c89ebU,
0xa927eeceU, 0x61c935b7U, 0x1ce5ede1U, 0x47b13c7aU,
0xd2df599cU, 0xf2733f55U, 0x14ce7918U, 0xc737bf73U,
0xf7cdea53U, 0xfdaa5b5fU, 0x3d6f14dfU, 0x44db8678U,
0xaff381caU, 0x68c43eb9U, 0x24342c38U, 0xa3405fc2U,
0x1dc37216U, 0xe2250cbcU, 0x3c498b28U, 0x0d9541ffU,
0xa8017139U, 0x0cb3de08U, 0xb4e49cd8U, 0x56c19064U,
0xcb84617bU, 0x32b670d5U, 0x6c5c7448U, 0xb85742d0U,
};
__constant__ u32 Td4[256] =
{
0x52525252U, 0x09090909U, 0x6a6a6a6aU, 0xd5d5d5d5U,
0x30303030U, 0x36363636U, 0xa5a5a5a5U, 0x38383838U,
0xbfbfbfbfU, 0x40404040U, 0xa3a3a3a3U, 0x9e9e9e9eU,
0x81818181U, 0xf3f3f3f3U, 0xd7d7d7d7U, 0xfbfbfbfbU,
0x7c7c7c7cU, 0xe3e3e3e3U, 0x39393939U, 0x82828282U,
0x9b9b9b9bU, 0x2f2f2f2fU, 0xffffffffU, 0x87878787U,
0x34343434U, 0x8e8e8e8eU, 0x43434343U, 0x44444444U,
0xc4c4c4c4U, 0xdedededeU, 0xe9e9e9e9U, 0xcbcbcbcbU,
0x54545454U, 0x7b7b7b7bU, 0x94949494U, 0x32323232U,
0xa6a6a6a6U, 0xc2c2c2c2U, 0x23232323U, 0x3d3d3d3dU,
0xeeeeeeeeU, 0x4c4c4c4cU, 0x95959595U, 0x0b0b0b0bU,
0x42424242U, 0xfafafafaU, 0xc3c3c3c3U, 0x4e4e4e4eU,
0x08080808U, 0x2e2e2e2eU, 0xa1a1a1a1U, 0x66666666U,
0x28282828U, 0xd9d9d9d9U, 0x24242424U, 0xb2b2b2b2U,
0x76767676U, 0x5b5b5b5bU, 0xa2a2a2a2U, 0x49494949U,
0x6d6d6d6dU, 0x8b8b8b8bU, 0xd1d1d1d1U, 0x25252525U,
0x72727272U, 0xf8f8f8f8U, 0xf6f6f6f6U, 0x64646464U,
0x86868686U, 0x68686868U, 0x98989898U, 0x16161616U,
0xd4d4d4d4U, 0xa4a4a4a4U, 0x5c5c5c5cU, 0xccccccccU,
0x5d5d5d5dU, 0x65656565U, 0xb6b6b6b6U, 0x92929292U,
0x6c6c6c6cU, 0x70707070U, 0x48484848U, 0x50505050U,
0xfdfdfdfdU, 0xededededU, 0xb9b9b9b9U, 0xdadadadaU,
0x5e5e5e5eU, 0x15151515U, 0x46464646U, 0x57575757U,
0xa7a7a7a7U, 0x8d8d8d8dU, 0x9d9d9d9dU, 0x84848484U,
0x90909090U, 0xd8d8d8d8U, 0xababababU, 0x00000000U,
0x8c8c8c8cU, 0xbcbcbcbcU, 0xd3d3d3d3U, 0x0a0a0a0aU,
0xf7f7f7f7U, 0xe4e4e4e4U, 0x58585858U, 0x05050505U,
0xb8b8b8b8U, 0xb3b3b3b3U, 0x45454545U, 0x06060606U,
0xd0d0d0d0U, 0x2c2c2c2cU, 0x1e1e1e1eU, 0x8f8f8f8fU,
0xcacacacaU, 0x3f3f3f3fU, 0x0f0f0f0fU, 0x02020202U,
0xc1c1c1c1U, 0xafafafafU, 0xbdbdbdbdU, 0x03030303U,
0x01010101U, 0x13131313U, 0x8a8a8a8aU, 0x6b6b6b6bU,
0x3a3a3a3aU, 0x91919191U, 0x11111111U, 0x41414141U,
0x4f4f4f4fU, 0x67676767U, 0xdcdcdcdcU, 0xeaeaeaeaU,
0x97979797U, 0xf2f2f2f2U, 0xcfcfcfcfU, 0xcecececeU,
0xf0f0f0f0U, 0xb4b4b4b4U, 0xe6e6e6e6U, 0x73737373U,
0x96969696U, 0xacacacacU, 0x74747474U, 0x22222222U,
0xe7e7e7e7U, 0xadadadadU, 0x35353535U, 0x85858585U,
0xe2e2e2e2U, 0xf9f9f9f9U, 0x37373737U, 0xe8e8e8e8U,
0x1c1c1c1cU, 0x75757575U, 0xdfdfdfdfU, 0x6e6e6e6eU,
0x47474747U, 0xf1f1f1f1U, 0x1a1a1a1aU, 0x71717171U,
0x1d1d1d1dU, 0x29292929U, 0xc5c5c5c5U, 0x89898989U,
0x6f6f6f6fU, 0xb7b7b7b7U, 0x62626262U, 0x0e0e0e0eU,
0xaaaaaaaaU, 0x18181818U, 0xbebebebeU, 0x1b1b1b1bU,
0xfcfcfcfcU, 0x56565656U, 0x3e3e3e3eU, 0x4b4b4b4bU,
0xc6c6c6c6U, 0xd2d2d2d2U, 0x79797979U, 0x20202020U,
0x9a9a9a9aU, 0xdbdbdbdbU, 0xc0c0c0c0U, 0xfefefefeU,
0x78787878U, 0xcdcdcdcdU, 0x5a5a5a5aU, 0xf4f4f4f4U,
0x1f1f1f1fU, 0xddddddddU, 0xa8a8a8a8U, 0x33333333U,
0x88888888U, 0x07070707U, 0xc7c7c7c7U, 0x31313131U,
0xb1b1b1b1U, 0x12121212U, 0x10101010U, 0x59595959U,
0x27272727U, 0x80808080U, 0xececececU, 0x5f5f5f5fU,
0x60606060U, 0x51515151U, 0x7f7f7f7fU, 0xa9a9a9a9U,
0x19191919U, 0xb5b5b5b5U, 0x4a4a4a4aU, 0x0d0d0d0dU,
0x2d2d2d2dU, 0xe5e5e5e5U, 0x7a7a7a7aU, 0x9f9f9f9fU,
0x93939393U, 0xc9c9c9c9U, 0x9c9c9c9cU, 0xefefefefU,
0xa0a0a0a0U, 0xe0e0e0e0U, 0x3b3b3b3bU, 0x4d4d4d4dU,
0xaeaeaeaeU, 0x2a2a2a2aU, 0xf5f5f5f5U, 0xb0b0b0b0U,
0xc8c8c8c8U, 0xebebebebU, 0xbbbbbbbbU, 0x3c3c3c3cU,
0x83838383U, 0x53535353U, 0x99999999U, 0x61616161U,
0x17171717U, 0x2b2b2b2bU, 0x04040404U, 0x7e7e7e7eU,
0xbabababaU, 0x77777777U, 0xd6d6d6d6U, 0x26262626U,
0xe1e1e1e1U, 0x69696969U, 0x14141414U, 0x63636363U,
0x55555555U, 0x21212121U, 0x0c0c0c0cU, 0x7d7d7d7dU,
};
__constant__ u32 rcon[] =
{
0x01000000, 0x02000000, 0x04000000, 0x08000000,
0x10000000, 0x20000000, 0x40000000, 0x80000000,
0x1B000000, 0x36000000,
/* for 128-bit blocks, Rijndael never uses more than 10 rcon values */
};
/*
* Device code file for AES
*/
__global__ void aes_encrypt_bpt(u32 *rk, int nrounds, u8* text)
{
u32 s0, s1, s2, s3, t0, t1, t2, t3;
u8 *txt = text+(16*(blockIdx.x*blockDim.x+threadIdx.x));
s0 = GETU32(txt ) ^ rk[0];
s1 = GETU32(txt + 4) ^ rk[1];
s2 = GETU32(txt + 8) ^ rk[2];
s3 = GETU32(txt + 12) ^ rk[3];
/* round 1: */
t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[ 4];
t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[ 5];
t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[ 6];
t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[ 7];
/* round 2: */
s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[ 8];
s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[ 9];
s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[10];
s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[11];
/* round 3: */
t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[12];
t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[13];
t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[14];
t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[15];
/* round 4: */
s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[16];
s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[17];
s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[18];
s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[19];
/* round 5: */
t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[20];
t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[21];
t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[22];
t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[23];
/* round 6: */
s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[24];
s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[25];
s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[26];
s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[27];
/* round 7: */
t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[28];
t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[29];
t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[30];
t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[31];
/* round 8: */
s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[32];
s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[33];
s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[34];
s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[35];
/* round 9: */
t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[36];
t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[37];
t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[38];
t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[39];
if (nrounds > 10)
{
/* round 10: */
s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[40];
s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[41];
s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[42];
s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[43];
/* round 11: */
t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[44];
t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[45];
t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[46];
t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[47];
if (nrounds > 12)
{
/* round 12: */
s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[48];
s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[49];
s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[50];
s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[51];
/* round 13: */
t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[52];
t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[53];
t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[54];
t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[55];
}
}
rk += nrounds << 2;
s0 =
(Te4[(t0 >> 24) ] & 0xff000000) ^
(Te4[(t1 >> 16) & 0xff] & 0x00ff0000) ^
(Te4[(t2 >> 8) & 0xff] & 0x0000ff00) ^
(Te4[(t3 ) & 0xff] & 0x000000ff) ^
rk[0];
PUTU32(txt , s0);
s1 =
(Te4[(t1 >> 24) ] & 0xff000000) ^
(Te4[(t2 >> 16) & 0xff] & 0x00ff0000) ^
(Te4[(t3 >> 8) & 0xff] & 0x0000ff00) ^
(Te4[(t0 ) & 0xff] & 0x000000ff) ^
rk[1];
PUTU32(txt + 4, s1);
s2 =
(Te4[(t2 >> 24) ] & 0xff000000) ^
(Te4[(t3 >> 16) & 0xff] & 0x00ff0000) ^
(Te4[(t0 >> 8) & 0xff] & 0x0000ff00) ^
(Te4[(t1 ) & 0xff] & 0x000000ff) ^
rk[2];
PUTU32(txt + 8, s2);
s3 =
(Te4[(t3 >> 24) ] & 0xff000000) ^
(Te4[(t0 >> 16) & 0xff] & 0x00ff0000) ^
(Te4[(t1 >> 8) & 0xff] & 0x0000ff00) ^
(Te4[(t2 ) & 0xff] & 0x000000ff) ^
rk[3];
PUTU32(txt + 12, s3);
}
__global__ void aes_decrypt_bpt(u32 *rk, int nrounds, u8* text)
{
u32 s0, s1, s2, s3, t0, t1, t2, t3;
u8 *txt = text+(16*(blockIdx.x*blockDim.x+threadIdx.x));
/*
* map byte array block to cipher state
* and add initial round key:
*/
s0 = GETU32(txt ) ^ rk[0];
s1 = GETU32(txt + 4) ^ rk[1];
s2 = GETU32(txt + 8) ^ rk[2];
s3 = GETU32(txt + 12) ^ rk[3];
/* round 1: */
t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[ 4];
t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[ 5];
t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[ 6];
t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[ 7];
/* round 2: */
s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[ 8];
s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^ Td3[t2 & 0xff] ^ rk[ 9];
s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >> 8) & 0xff] ^ Td3[t3 & 0xff] ^ rk[10];
s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[11];
/* round 3: */
t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[12];
t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[13];
t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[14];
t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[15];
/* round 4: */
s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[16];
s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^ Td3[t2 & 0xff] ^ rk[17];
s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >> 8) & 0xff] ^ Td3[t3 & 0xff] ^ rk[18];
s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[19];
/* round 5: */
t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[20];
t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[21];
t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[22];
t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[23];
/* round 6: */
s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[24];
s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^ Td3[t2 & 0xff] ^ rk[25];
s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >> 8) & 0xff] ^ Td3[t3 & 0xff] ^ rk[26];
s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[27];
/* round 7: */
t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[28];
t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[29];
t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[30];
t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[31];
/* round 8: */
s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[32];
s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^ Td3[t2 & 0xff] ^ rk[33];
s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >> 8) & 0xff] ^ Td3[t3 & 0xff] ^ rk[34];
s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[35];
/* round 9: */
t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[36];
t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[37];
t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[38];
t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[39];
if (nrounds > 10)
{
/* round 10: */
s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[40];
s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^ Td3[t2 & 0xff] ^ rk[41];
s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >> 8) & 0xff] ^ Td3[t3 & 0xff] ^ rk[42];
s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[43];
/* round 11: */
t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[44];
t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[45];
t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[46];
t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[47];
if (nrounds > 12)
{
/* round 12: */
s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[48];
s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^ Td3[t2 & 0xff] ^ rk[49];
s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >> 8) & 0xff] ^ Td3[t3 & 0xff] ^ rk[50];
s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[51];
/* round 13: */
t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[52];
t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[53];
t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[54];
t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[55];
}
}
rk += nrounds << 2;
s0 =
(Td4[(t0 >> 24) ] & 0xff000000) ^
(Td4[(t3 >> 16) & 0xff] & 0x00ff0000) ^
(Td4[(t2 >> 8) & 0xff] & 0x0000ff00) ^
(Td4[(t1 ) & 0xff] & 0x000000ff) ^
rk[0];
PUTU32(txt , s0);
s1 =
(Td4[(t1 >> 24) ] & 0xff000000) ^
(Td4[(t0 >> 16) & 0xff] & 0x00ff0000) ^
(Td4[(t3 >> 8) & 0xff] & 0x0000ff00) ^
(Td4[(t2 ) & 0xff] & 0x000000ff) ^
rk[1];
PUTU32(txt + 4, s1);
s2 =
(Td4[(t2 >> 24) ] & 0xff000000) ^
(Td4[(t1 >> 16) & 0xff] & 0x00ff0000) ^
(Td4[(t0 >> 8) & 0xff] & 0x0000ff00) ^
(Td4[(t3 ) & 0xff] & 0x000000ff) ^
rk[2];
PUTU32(txt + 8, s2);
s3 =
(Td4[(t3 >> 24) ] & 0xff000000) ^
(Td4[(t2 >> 16) & 0xff] & 0x00ff0000) ^
(Td4[(t1 >> 8) & 0xff] & 0x0000ff00) ^
(Td4[(t0 ) & 0xff] & 0x000000ff) ^
rk[3];
PUTU32(txt + 12, s3);
}
|
edd95096257ad5de470ddcff2134d8acdee028bc.hip | // !!! This is a file automatically generated by hipify!!!
#include "shapes.hpp"
#include <iostream>
#include <fstream>
using namespace std;
Affine_map::Affine_map( float a11, float a12,
float a21, float a22,
float b1, float b2) {
this->a11 = a11; this->a12 = a12;
this->a21 = a21; this->a22 = a22;
this->b1 = b1; this->b2 = b2;
this->det = a11 * a22 - a12 * a21;
}
// Generating error (comes up in the gen_translation function of Bounding_box)
Generr::Generr() {};
// Initializing the static members of Bounding_box
std::random_device Bounding_box::rdev;
std::mt19937 Bounding_box::gen(rdev());
void Bounding_box::transform (Affine_map T) {
float new_x, new_y;
for (int i = 0; i < 4; i++) {
new_x = T.a11*x[i] + T.a12*y[i] + T.b1;
new_y = T.a21*x[i] + T.a22*y[i] + T.b2;
x[i] = new_x;
y[i] = new_y;
xmax = ::max(xmax, new_x); xmin = ::min(xmin, new_x);
ymax = ::max(ymax, new_y); ymin = ::min(ymin, new_y);
}
xh = x_bound - xmax;
xl = -xmin;
yh = y_bound - ymax;
yl = -ymin;
}
void Bounding_box::print_to_file (ofstream& to) {
for (int i = 0; i < 4; i++)
to << x[i] << " " << y[i] << ",";
to << "!";
}
Affine_map Bounding_box::gen_translation() {
if ( (xl >= xh) || (yl >= yh) ) throw Generr();
// A translation that moves the shape inside the canvas can't
// be generated
uniform_int_distribution<int> xdistr (xl, xh);
uniform_int_distribution<int> ydistr (yl, yh);
Affine_map T = Affine_map(1, 0, 0, 1, xdistr(gen), ydistr(gen));
return T;
}
Bounding_box::Bounding_box (float x0, float y0,
float x1, float y1,
float x2, float y2,
float x3, float y3,
float x_bound, float y_bound) {
x[0] = x0; y[0] = y0;
x[1] = x1; y[1] = y1;
x[2] = x2; y[2] = y2;
x[3] = x3; y[3] = y3;
}
// =====================================================================
// =================== Equations ====================================
// =====================================================================
void Linear_Eq::transform (Affine_map T) {
float new_A = (1/T.det) * ( A * T.a22 - B * T.a21),
new_B = (1/T.det) * ( -A * T.a12 + B * T.a11),
new_C = (1/T.det) * ( A*(-T.a22*T.b1 + T.a12*T.b2) + B*(T.a21*T.b1 - T.a11*T.b2)) + C;
A = new_A; B = new_B; C = new_C;
}
bool Linear_Eq::in_halfspace(int x, int y) {
bool c = ((A*x + B*y + C) >= 0);
if ((geq && c) || (!geq && !c)) return true;
else return false;
}
Linear_Eq::Linear_Eq (float A, float B, float C, bool geq) {
this->A = A; this->B = B; this->C = C;
this->geq = geq;
}
// ===================================================================
void Quadratic_Eq::transform (Affine_map T) {
float new_A = (1/T.det) * (1/T.det) * (A*T.a22*T.a22
- B*T.a21*T.a22
+ C*T.a21*T.a21),
new_B = (1/T.det) * (1/T.det) * (-2*A*T.a12*T.a22
+ B*(T.a11*T.a22 + T.a12*T.a21)
- 2*C*(T.a11*T.a21)),
new_C = (1/T.det) * (1/T.det) * (A*T.a12*T.a12
- B*T.a11*T.a12
+ C*T.a11*T.a11),
new_D = (1/T.det) * ( (1/T.det) * (2*A*(T.a12*T.a22*T.b2 - T.a22*T.a22*T.b1)
+ B*(2*T.a21*T.a22*T.b1 - T.a11*T.a22*T.b2 - T.a21*T.a12*T.b2)
+ 2*C*(T.a11*T.a21*T.b2 - T.a21*T.a21*T.b1))
+ D*T.a22 - E*T.a21),
new_E = (1/T.det) * ( (1/T.det) * (2*A*(T.a12*T.a22*T.b1 - T.a12*T.a12*T.b2)
+ B*(2*T.a11*T.a12*T.b2 - T.a12*T.a21*T.b1 - T.a11*T.a22*T.b1)
+ 2*C*(T.a11*T.a21*T.b1 - T.a11*T.a11*T.b2))
- D*T.a12 + E*T.a11),
new_F = (1/T.det) * ( (1/T.det) * (A*(T.a22*T.a22*T.b1*T.b1 + T.a12*T.a12*T.b2*T.b2 - 2*T.a12*T.a22*T.b1*T.b2)
+ B*((T.a11*T.a22 + T.a12*T.a21)*T.b1*T.b2 - T.a21*T.a22*T.b1*T.b1 - T.a11*T.a12*T.b2*T.b2)
+ C*(T.a21*T.a21*T.b1*T.b1 + T.a11*T.a11*T.b2*T.b2 - 2*T.a11*T.a21*T.b1*T.b2))
+ D*(-T.a22*T.b1 + T.a12*T.b2) + E*(T.a21*T.b1 - T.a11*T.b2)) + F;
A = new_A; B = new_B; C = new_C;
D = new_D; E = new_E; F = new_F;
}
bool Quadratic_Eq::in_halfspace (int x, int y) {
bool c = ( (A*x*x + B*x*y + C*y*y + D*x + E*y + F) >= 0);
if ((geq && c) || (!geq && !c)) return true;
else return false;
}
Quadratic_Eq::Quadratic_Eq (float A, float B, float C, float D, float E, float F, bool geq) {
this->A = A; this->B = B; this->C = C;
this->D = D; this->E = E; this->F = F;
this->geq = geq;
}
// ===================================================================
void Cubic_Eq::transform (Affine_map T) {
float new_A = (1/T.det) * (1/T.det) * (1/T.det) * (A*T.a22*T.a22*T.a22 // u^3
-B*T.a21*T.a22*T.a22
+C*T.a21*T.a21*T.a22
-D*T.a21*T.a21*T.a21),
new_B = (1/T.det) * (1/T.det) * (1/T.det) * ((-1)*3*A*T.a12*T.a22*T.a22 // u^2 v
+B*T.a22*(T.a11*T.a22 + 2*T.a12*T.a21)
-C*T.a21*(2*T.a11*T.a22 + T.a12*T.a21)
+3*D*T.a11*T.a21*T.a21),
new_C = (1/T.det) * (1/T.det) * (1/T.det) * (3*A*T.a12*T.a12*T.a22 // uv^2
-B*T.a12*(2*T.a11*T.a22 + T.a12*T.a21)
+C*T.a11*(T.a11*T.a22 + 2*T.a12*T.a21)
-3*D*T.a11*T.a11*T.a21),
new_D = (1/T.det) * (1/T.det) * (1/T.det) * ((-1)*A*T.a12*T.a12*T.a12 // v^3
+B*T.a11*T.a12*T.a12
+(-1)*C*T.a11*T.a11*T.a12
+D*T.a11*T.a11*T.a11),
new_E = (1/T.det) * (1/T.det) * ( (1/T.det) * (3*A*T.a22*T.a22*(T.a12*T.b2 - T.a22*T.b1) // u^2
+B*T.a22*(3*T.a21*T.a22*T.b1 - T.a11*T.a22*T.b2 - 2*T.a12*T.a21*T.b2)
+C*T.a21*(-3*T.a21*T.a22*T.b1 + 2*T.a11*T.a22*T.b2 + T.a12*T.a21*T.b2)
+3*D*T.a21*T.a21*(T.a21*T.b1 - T.a11*T.b2))
+E*T.a22*T.a22
-F*T.a21*T.a22
+G*T.a21*T.a21),
new_F = (1/T.det) * (1/T.det) * ( (1/T.det) * (6*A*T.a12*T.a22*(T.a22*T.b1 - T.a12*T.b2) // uv
+B*(4*T.a12*T.a22*(T.a11*T.b2 - T.a21*T.b1) + 2*((-1)*T.a11*T.a22*T.a22*T.b1 + T.a12*T.a12*T.a21*T.b2))
+C*( 4*T.a11*T.a21*(T.a22*T.b1 - T.a12*T.b2) + 2*(T.a12*T.a21*T.a21*T.b1 - T.a11*T.a11*T.a22*T.b2) )
+6*D*T.a11*T.a21*(T.a11*T.b2 - T.a21*T.b1) )
-2*E*T.a12*T.a22
+ F*(T.a11*T.a22 + T.a12*T.a21)
- 2*G*(T.a11*T.a21)),
new_G = (1/T.det) * (1/T.det) * ( (1/T.det) * (3*A*T.a12*T.a12*(T.a12*T.b2 - T.a22*T.b1) // v^2
+B*T.a12*(2*T.a11*T.a22*T.b1 + T.a12*T.a21*T.b1 - 3*T.a11*T.a12*T.b2)
+C*T.a11*((-1)*T.a11*T.a22*T.b1 - 2*T.a12*T.a21*T.b1 + 3*T.a11*T.a12*T.b2)
+3*D*T.a11*T.a11*(T.a21*T.b1 - T.a11*T.b2) )
+E*T.a12*T.a12
-F*T.a11*T.a12
+G*T.a11*T.a11),
new_H = (1/T.det) * ( (1/T.det) * ((1/T.det) * (A*3*T.a22*(T.a22*T.b1 - T.a12*T.b2)*(T.a22*T.b1 - T.a12*T.b2) // u
+B*(2*T.a11*T.a22*T.b2*(T.a22*T.b1 - T.a12*T.b2)
+ T.a21*(3*T.a22*T.a22*T.b1*T.b1 - T.a12*T.a12*T.b2*T.b2 + 4*T.a12*T.a22*T.b1*T.b2))
+C*(2*T.a12*T.a21*T.b2*(T.a11*T.b2 - T.a21*T.b1)
+ T.a22*(3*T.a21*T.a21*T.b1*T.b1 + T.a11*T.a11*T.b2*T.b2 - 4*T.a11*T.a21*T.b1*T.b2))
-D*3*T.a21*(T.a21*T.b1 - T.a11*T.b2)*(T.a21*T.b1 - T.a11*T.b2))
+ 2*E*(T.a12*T.a22*T.b2 - T.a22*T.a22*T.b1)
+ F*(2*T.a21*T.a22*T.b1 - T.a11*T.a22*T.b2 - T.a21*T.a12*T.b2)
+ 2*G*(T.a11*T.a21*T.b2 - T.a21*T.a21*T.b1))
+ H*T.a22 - I*T.a21),
new_I = (1/T.det) * ( (1/T.det) * ((1/T.det) * ((-1)*A*3*T.a12*(T.a22*T.b1 - T.a12*T.b2)*(T.a22*T.b1 - T.a12*T.b2) // v
+B*(2*T.a12*T.a21*T.b1*(T.a22*T.b1 - T.a12*T.b2)
+ T.a11*(T.a22*T.a22*T.b1*T.b1 + 3*T.a12*T.a12*T.b2*T.b2 - 4*T.a12*T.a22*T.b1*T.b2))
+C*(2*T.a11*T.a22*T.b1*(T.a11*T.b2 - T.a21*T.b1)
+ T.a12*(T.a21*T.a21*T.b1*T.b1 - 3*T.a11*T.a11*T.b2*T.b2 + 4*T.a11*T.a21*T.b1*T.b2))
+D*3*T.a11*(T.a11*T.b2 - T.a21*T.b1)*(T.a11*T.b2 - T.a21*T.b1))
+ 2*E*(T.a12*T.a22*T.b1 - T.a12*T.a12*T.b2)
+ F*(2*T.a11*T.a12*T.b2 - T.a12*T.a21*T.b1 - T.a11*T.a22*T.b1)
+ 2*G*(T.a11*T.a21*T.b1 - T.a11*T.a11*T.b2))
- H*T.a12 + I*T.a11),
new_J = (1/T.det) * (
(1/T.det) * (
(1/T.det) * (A*(T.a12*T.b2 - T.a22*T.b1)*(T.a12*T.b2 - T.a22*T.b1)*(T.a12*T.b2 - T.a22*T.b1) // 1
+B*(T.a22*T.b1 - T.a12*T.b2)*(T.a22*T.b1 - T.a12*T.b2)*(T.a21*T.b1 - T.a11*T.b2)
+C*(T.a11*T.b2 - T.a21*T.b1)*(T.a11*T.b2 - T.a21*T.b1)*(T.a12*T.b2 - T.a22*T.b1)
+D*(T.a21*T.b1 - T.a11*T.b2)*(T.a21*T.b1 - T.a11*T.b2)*(T.a21*T.b1 - T.a11*T.b2)
)
+ E*(T.a22*T.b1 - T.a12*T.b2)*(T.a22*T.b1 - T.a12*T.b2)
+ F*((T.a11*T.a22 + T.a12*T.a21)*T.b1*T.b2 - T.a21*T.a22*T.b1*T.b1 - T.a11*T.a12*T.b2*T.b2)
+ G*(T.a21*T.b1 - T.a11*T.b2)*(T.a21*T.b1 - T.a11*T.b2)
)
+ H*(-T.a22*T.b1 + T.a12*T.b2) + I*(T.a21*T.b1 - T.a11*T.b2)
) + J;
A = new_A; B = new_B; C = new_C; D = new_D; E = new_E;
F = new_F; G = new_G; H = new_H; I = new_I; J = new_J;
}
bool Cubic_Eq::in_halfspace(int x, int y) {
bool c = ((A*x*x*x + B*x*x*y + C*x*y*y + D*y*y*y + E*x*x + F*x*y + G*y*y + H*x + I*y + J) >= 0);
if ((geq && c) || (!geq && !c)) return true;
else return false;
}
Cubic_Eq::Cubic_Eq (float A, float B, float C, float D, float E,
float F, float G, float H, float I, float J, bool geq) {
this->A = A; this->B = B; this->C = C; this->D = D; this->E = E;
this->F = F; this->G = G; this->H = H; this->I = I; this->J = J;
this->geq = geq;
}
// =====================================================================
// =================== Shapes =======================================
// =====================================================================
Shape::Shape (int xmax, int ymax) {
this->xmax = xmax; this->ymax = ymax;
this->xmid = xmax / 2; this->ymid = ymax / 2;
}
// =================== CPU Shape =======================================
Affine_map CPU_Shape::gen_translation() {
return box->gen_translation();
}
void CPU_Shape::rounden (int r) {
int i, j, a, b, c;
std::vector<coords> coord_stack;
for (i = r; i < INPUT_DIM-r; i++) {
for (j = r; j < INPUT_DIM-r; j++) {
c = i + j*INPUT_DIM;
if (bitmap[c] == 1)
coord_stack.push_back( std::make_tuple(i,j) );
}
}
for (std::vector<coords>::iterator it = coord_stack.begin(); it != coord_stack.end(); it++) {
i = std::get<0>(*it);
j = std::get<1>(*it);
for (a = -r; a <= r; a++) {
for (b = -r; b <= r; b++) {
if (a*a + b*b <= r*r) {
c = (i+a) + (j+b)*INPUT_DIM;
bitmap[c] = 1;
}
}
}
}
}
void CPU_Shape::print_to_file(ofstream& to, bool bbox) {
for (int i = 0; i < xmax*ymax; i++) {
to << bitmap[i];
} to << ",";
if (bbox) box->print_to_file(to);
}
float CPU_Shape::find_L2_distance_from (Shape* s2_in) {
CPU_Shape* s2 = static_cast<CPU_Shape*>(s2_in);
float dist = 0.0;
if ( (xmax != s2->xmax) || (ymax != s2->ymax) ) {
cout << "The shapes have different dimensions" << endl;
}
else {
for (int i = 0; i < xmax * ymax; i++) {
dist += (float) ( (bitmap[i] - s2->bitmap[i])*(bitmap[i] - s2->bitmap[i]) );
}
}
return dist;
}
void CPU_Shape::gen_bitmap() {
if (!bitmap_generated) {
bitmap = new float[xmax * ymax];
bitmap_generated = true;
}
}
CPU_Shape::~CPU_Shape() {
if (bitmap_generated) delete[] bitmap;
delete box;
}
// =================== GPU Shape =======================================
Affine_map GPU_Shape::gen_translation () { Affine_map T; return T; }
float GPU_Shape::find_L2_distance_from (Shape* s2) { return 0.0; }
void GPU_Shape::print_to_file (ofstream& to, bool bbox) {}
void GPU_Shape::gen_bitmap() {
if (!bitmap_generated) {
hipMalloc( (void**) &bitmap, xmax * ymax * sizeof(float));
bitmap_generated = true;
}
}
GPU_Shape::GPU_Shape () {
hipMalloc ((void**) &box, sizeof(Bounding_box));
}
GPU_Shape::~GPU_Shape () {
hipFree(bitmap); hipFree(box);
}
// ========================= Triangle =============================
void Triangle::transform (Affine_map T) {
L1.transform(T); L2.transform(T); L3.transform(T);
box->transform(T);
}
void Triangle::gen_bitmap() {
CPU_Shape::gen_bitmap();
int x, y;
for (int i = 0; i < xmax*ymax; i++) {
x = (i % xmax) * 1 ;
y = (i / xmax) * 1 ;
if ( L1.in_halfspace(x,y) &&
L2.in_halfspace(x,y) &&
L3.in_halfspace(x,y) )
bitmap[i] = 1;
else bitmap[i] = 0;
}
}
void Triangle::copy (Shape* s) {
Triangle* t = dynamic_cast<Triangle*>(s);
this->L1 = t->L1; this->L2 = t->L2; this->L3 = t->L3;
*this->box = *t->box;
}
Triangle::Triangle() {
L1 = Linear_Eq(1, 0.577 * xmid / ymid, -1.289 * xmid, true);
L2 = Linear_Eq(1, -0.577 * xmid / ymid, -0.711 * xmid, false);
L3 = Linear_Eq(0, 1, -3*ymid/2, false);
box = new Bounding_box( xmid - 0.577*ymid-1, ymid/2-1,
xmid + 0.577*ymid+1, ymid/2-1,
xmid + 0.577*ymid-1, 3*ymid/2+1,
xmid - 0.577*ymid+1, 3*ymid/2+1);
}
void GPU_Triangle::transform (Affine_map T) {
L1->transform(T); L2->transform(T); L3->transform(T);
box->transform(T);
}
void GPU_Triangle::gen_bitmap () {}
void GPU_Triangle::copy (Shape* s) {}
GPU_Triangle::GPU_Triangle() {
Linear_Eq *L1_host = new Linear_Eq(1, 0.577 * xmid / ymid, -1.289 * xmid, true),
*L2_host = new Linear_Eq(1, -0.577 * xmid / ymid, -0.711 * xmid, false),
*L3_host = new Linear_Eq(0, 1, -3*ymid/2, false);
Bounding_box* box_host = new Bounding_box( xmid - 0.577*ymid-1, ymid/2-1,
xmid + 0.577*ymid+1, ymid/2-1,
xmid + 0.577*ymid-1, 3*ymid/2+1,
xmid - 0.577*ymid+1, 3*ymid/2+1);
hipMemcpy (L1, L1_host, sizeof(Linear_Eq), hipMemcpyHostToDevice);
hipMemcpy (L2, L2_host, sizeof(Linear_Eq), hipMemcpyHostToDevice);
hipMemcpy (L3, L3_host, sizeof(Linear_Eq), hipMemcpyHostToDevice);
hipMemcpy (box, box_host, sizeof(Bounding_box), hipMemcpyHostToDevice);
delete L1_host; delete L2_host; delete L3_host; delete box_host;
}
// ========================= Square =============================
void Square::transform (Affine_map T) {
L1.transform(T); L2.transform(T); L3.transform(T); L4.transform(T);
box->transform(T);
}
void Square::gen_bitmap() {
CPU_Shape::gen_bitmap();
int x, y;
for (int i = 0; i < xmax*ymax; i++) {
x = (i % xmax) * 1 ;
y = (i / xmax) * 1 ;
if ( L1.in_halfspace(x,y) &&
L2.in_halfspace(x,y) &&
L3.in_halfspace(x,y) &&
L4.in_halfspace(x,y) )
bitmap[i] = 1;
else bitmap[i] = 0;
}
}
void Square::copy (Shape* s) {
Square* sq = dynamic_cast<Square*>(s);
this->L1 = sq->L1; this->L2 = sq->L2; this->L3 = sq->L3; this->L4 = sq->L4;
*this->box = *sq->box;
}
Square::Square() {
L1 = Linear_Eq(1, 0, -xmid/2, true);
L2 = Linear_Eq(1, 0, -3*xmid/2, false);
L3 = Linear_Eq(0, 1, -ymid/2, true);
L4 = Linear_Eq(0, 1, -3*ymid/2, false);
box = new Bounding_box( xmid/2-1, ymid/2-1,
3*xmid/2+1, ymid/2-1,
3*xmid/2+1, 3*ymid/2+1,
xmid/2-1, 3*ymid/2+1);
}
// ========================= Pentagon =============================
void Pentagon::transform (Affine_map T) {
box->transform(T);
L1.transform(T); L2.transform(T); L3.transform(T);
L4.transform(T); L5.transform(T);
}
void Pentagon::gen_bitmap() {
CPU_Shape::gen_bitmap();
int x, y;
for (int i = 0; i < xmax*ymax; i++) {
x = (i % xmax) * 1 ;
y = (i / xmax) * 1 ;
if ( L1.in_halfspace(x,y) &&
L2.in_halfspace(x,y) &&
L3.in_halfspace(x,y) &&
L4.in_halfspace(x,y) &&
L5.in_halfspace(x,y) )
bitmap[i] = 1;
else bitmap[i] = 0;
}
}
void Pentagon::copy (Shape* s) {
Pentagon* p = dynamic_cast<Pentagon*>(s);
this->L1 = p->L1; this->L2 = p->L2; this->L3 = p->L3; this->L4 = p->L4;
this->L5 = p->L5;
*this->box = *p->box;
}
Pentagon::Pentagon() {
L1 = Linear_Eq(1.376, -1, -2.065 * xmid + ymid, false);
L2 = Linear_Eq(-0.325, -1, ymid - 0.1 * xmid, false);
L3 = Linear_Eq(1, 0, -0.595*xmid, true );
L4 = Linear_Eq(0.325, -1, ymid + 0.1 * xmid, true);
L5 = Linear_Eq(-1.376, -1, 2.065 * xmid + ymid, true);
box = new Bounding_box( 0.595*xmid, ymid/2,
3*xmid/2, ymid/2,
3*xmid/2, 3*ymid/2,
0.595*xmid, 3*ymid/2);
// The default pentagon defined as above is slightly too small
Affine_map T = Affine_map(1.3, 0, 0, 1.3, -20, -20);
transform(T);
}
// ========================= Hexagon =============================
void Hexagon::transform (Affine_map T) {
L1.transform(T); L2.transform(T); L3.transform(T);
L4.transform(T); L5.transform(T); L6.transform(T);
box->transform(T);
}
void Hexagon::gen_bitmap() {
CPU_Shape::gen_bitmap();
int x, y;
for (int i = 0; i < xmax*ymax; i++) {
x = (i % xmax) * 1 ;
y = (i / xmax) * 1 ;
if ( L1.in_halfspace(x,y) &&
L2.in_halfspace(x,y) &&
L3.in_halfspace(x,y) &&
L4.in_halfspace(x,y) &&
L5.in_halfspace(x,y) &&
L6.in_halfspace(x,y) )
bitmap[i] = 1;
else bitmap[i] = 0;
}
}
void Hexagon::copy (Shape* s) {
Hexagon* h = dynamic_cast<Hexagon*>(s);
this->L1 = h->L1; this->L2 = h->L2; this->L3 = h->L3; this->L4 = h->L4;
this->L5 = h->L5; this->L6 = h->L6;
*this->box = *h->box;
}
Hexagon::Hexagon() {
L1 = Linear_Eq(1, 0, -1.577*xmid, false);
L2 = Linear_Eq(0.577*ymid/xmid, -1, -0.244*ymid, false);
L3 = Linear_Eq(-0.577*ymid/xmid, -1, 0.911 * ymid, false);
L4 = Linear_Eq(1, 0, -0.423*xmid, true);
L5 = Linear_Eq(0.577*ymid/xmid, -1, 1.089 * ymid, true);
L6 = Linear_Eq(-0.577*ymid/xmid, -1, 2.244 * ymid, true);
box = new Bounding_box( 0.423*xmid, ymid/3,
1.577*xmid+1, ymid/3,
1.577*xmid+1, 5*ymid/3+1,
0.423*xmid, 5*ymid/3+1);
}
// ========================= Circle =============================
void Circle::transform (Affine_map T) {
Q1.transform(T);
box->transform(T);
}
void Circle::gen_bitmap() {
CPU_Shape::gen_bitmap();
int x, y;
for (int i = 0; i < xmax*ymax; i++) {
x = (i % xmax) * 1 ;
y = (i / xmax) * 1 ;
if ( Q1.in_halfspace(x,y) )
bitmap[i] = 1;
else bitmap[i] = 0;
}
}
void Circle::copy (Shape* s) {
Circle* c = dynamic_cast<Circle*>(s);
this->Q1 = c->Q1;
*this->box = *c->box;
}
Circle::Circle () {
Q1 = Quadratic_Eq(1, 0, 1, -2*xmid, -2*ymid, xmid*xmid + 0.75*ymid*ymid, false);
box = new Bounding_box( 0.5*xmid, 0.5*ymid,
3*xmid/2+1, 0.5*ymid,
3*xmid/2+1, 3*ymid/2+1,
0.5*xmid, 3*ymid/2+1);
}
// ========================= Circle_II =============================
void Circle_II::transform (Affine_map T) {
Q1.transform(T); Q2.transform(T);
box->transform(T);
}
void Circle_II::gen_bitmap() {
CPU_Shape::gen_bitmap();
int x, y;
for (int i = 0; i < xmax*ymax; i++) {
x = (i % xmax) * 1 ;
y = (i / xmax) * 1 ;
if ( Q1.in_halfspace(x,y) &&
!Q2.in_halfspace(x,y) )
bitmap[i] = 1;
else bitmap[i] = 0;
}
}
void Circle_II::copy (Shape* s) {
Circle_II* c = dynamic_cast<Circle_II*>(s);
this->Q1 = c->Q1; this->Q2 = c->Q2;
*this->box = *c->box;
}
Circle_II::Circle_II () {
Q1 = Quadratic_Eq(1, 0, 1, -2*xmid, -2*ymid, xmid*xmid + 0.75*ymid*ymid, false);
Q2 = Quadratic_Eq(1, 0, 1, -2*xmid, -2*ymid, xmid*xmid + 0.9*ymid*ymid, false);
box = new Bounding_box( 0.5*xmid, 0.5*ymid,
3*xmid/2+1, 0.5*ymid,
3*xmid/2+1, 3*ymid/2+1,
0.5*xmid, 3*ymid/2+1);
}
// ========================= Circle_III =============================
void Circle_III::transform (Affine_map T) {
Q1.transform(T); Q2.transform(T); Q3.transform(T);
box->transform(T);
}
void Circle_III::gen_bitmap() {
CPU_Shape::gen_bitmap();
int x, y;
for (int i = 0; i < xmax*ymax; i++) {
x = (i % xmax) * 1 ;
y = (i / xmax) * 1 ;
if ( Q1.in_halfspace(x,y) &&
(!Q2.in_halfspace(x,y) || Q3.in_halfspace(x,y)) )
bitmap[i] = 1;
else bitmap[i] = 0;
}
}
void Circle_III::copy (Shape* s) {
Circle_III* c = dynamic_cast<Circle_III*>(s);
this->Q1 = c->Q1; this->Q2 = c->Q2; this->Q3 = c->Q3;
*this->box = *c->box;
}
Circle_III::Circle_III () {
Q1 = Quadratic_Eq(1, 0, 1, -2*xmid, -2*ymid, xmid*xmid + 0.75*ymid*ymid, false);
Q2 = Quadratic_Eq(1, 0, 1, -2*xmid, -2*ymid, xmid*xmid + 0.9*ymid*ymid, false);
Q3 = Quadratic_Eq(1, 0, 1, -2*xmid, -2*ymid, xmid*xmid + 0.95*ymid*ymid, false);
box = new Bounding_box( 0.5*xmid, 0.5*ymid,
3*xmid/2+1, 0.5*ymid,
3*xmid/2+1, 3*ymid/2+1,
0.5*xmid, 3*ymid/2+1);
}
// ========================= Circle_IV =============================
void Circle_IV::transform (Affine_map T) {
box->transform(T);
Q1.transform(T); Q2.transform(T); Q3.transform(T); Q4.transform(T);
}
void Circle_IV::gen_bitmap() {
CPU_Shape::gen_bitmap();
int x, y;
for (int i = 0; i < xmax*ymax; i++) {
x = (i % xmax) * 1 ;
y = (i / xmax) * 1 ;
if ( Q1.in_halfspace(x,y) &&
(!Q2.in_halfspace(x,y) || ( Q3.in_halfspace(x,y) && !Q4.in_halfspace(x,y) )) )
bitmap[i] = 1;
else bitmap[i] = 0;
}
}
void Circle_IV::copy (Shape* s) {
Circle_IV* c = dynamic_cast<Circle_IV*>(s);
this->Q1 = c->Q1; this->Q2 = c->Q2; this->Q3 = c->Q3; this->Q4 = c->Q4;
*this->box = *c->box;
}
Circle_IV::Circle_IV () {
Q1 = Quadratic_Eq(1, 0, 1, -2*xmid, -2*ymid, xmid*xmid + 0.75*ymid*ymid, false);
Q2 = Quadratic_Eq(1, 0, 1, -2*xmid, -2*ymid, xmid*xmid + 0.9*ymid*ymid, false);
Q3 = Quadratic_Eq(1, 0, 1, -2*xmid, -2*ymid, xmid*xmid + 0.95*ymid*ymid, false);
Q4 = Quadratic_Eq(1, 0, 1, -2*xmid, -2*ymid, xmid*xmid + 0.995*ymid*ymid, false);
box = new Bounding_box( 0.5*xmid, 0.5*ymid,
3*xmid/2+1, 0.5*ymid,
3*xmid/2+1, 3*ymid/2+1,
0.5*xmid, 3*ymid/2+1);
}
// ========================= Rhombus =============================
void Rhombus::transform (Affine_map T) {
L1.transform(T); L2.transform(T); L3.transform(T); L4.transform(T);
L5.transform(T); L6.transform(T); L7.transform(T); L8.transform(T);
box->transform(T);
}
void Rhombus::gen_bitmap() {
CPU_Shape::gen_bitmap();
int x, y;
for (int i = 0; i < xmax*ymax; i++) {
x = (i % xmax) * 1 ;
y = (i / xmax) * 1 ;
if ( L1.in_halfspace(x,y) &&
L2.in_halfspace(x,y) &&
L3.in_halfspace(x,y) &&
L4.in_halfspace(x,y) &&
!(L5.in_halfspace(x,y) &&
L6.in_halfspace(x,y) &&
L7.in_halfspace(x,y) &&
L8.in_halfspace(x,y)) )
bitmap[i] = 1;
else bitmap[i] = 0;
}
}
void Rhombus::copy (Shape* s) {
Rhombus* r = dynamic_cast<Rhombus*>(s);
this->L1 = r->L1; this->L2 = r->L2; this->L3 = r->L3; this->L4 = r->L4;
this->L5 = r->L5; this->L6 = r->L6; this->L7 = r->L7; this->L8 = r->L8;
*this->box = *r->box;
}
Rhombus::Rhombus () {
L1 = Linear_Eq(1, 1, -xmid - ymid/3, true);
L2 = Linear_Eq(1, -1, -xmid+ymid/3, false);
L3 = Linear_Eq(1, 1, -xmid-5*ymid/3, false);
L4 = Linear_Eq(1,-1,-xmid+5*ymid/3, true);
L5 = Linear_Eq(1, 1, -xmid - 2*ymid/3, true);
L6 = Linear_Eq(1, -1, -xmid+2*ymid/3 , false);
L7 = Linear_Eq(1, 1, -xmid-4*ymid/3, false);
L8 = Linear_Eq(1, -1, -xmid + 4*ymid/3, true);
box = new Bounding_box( xmid/3, ymid/3,
5*xmid/3+1, ymid/3,
5*xmid/3+1, 5*ymid/3+1,
xmid/3, 5*ymid/3+1);
}
// ========================= Rhombus_II =============================
void Rhombus_II::transform (Affine_map T) {
L1.transform(T); L2.transform(T); L3.transform(T); L4.transform(T);
L5.transform(T); L6.transform(T); L7.transform(T);
box->transform(T);
}
void Rhombus_II::gen_bitmap() {
CPU_Shape::gen_bitmap();
int x, y;
for (int i = 0; i < xmax*ymax; i++) {
x = (i % xmax) * 1 ;
y = (i / xmax) * 1 ;
if ( L1.in_halfspace(x,y) &&
L2.in_halfspace(x,y) &&
L3.in_halfspace(x,y) &&
L4.in_halfspace(x,y) &&
!(L5.in_halfspace(x,y) &&
L6.in_halfspace(x,y) &&
L7.in_halfspace(x,y)) )
bitmap[i] = 1;
else bitmap[i] = 0;
}
}
void Rhombus_II::copy (Shape* s) {
Rhombus_II* r = dynamic_cast<Rhombus_II*>(s);
this->L1 = r->L1; this->L2 = r->L2; this->L3 = r->L3; this->L4 = r->L4;
this->L5 = r->L5; this->L6 = r->L6; this->L7 = r->L7;
*this->box = *r->box;
}
Rhombus_II::Rhombus_II () {
L1 = Linear_Eq(1, 1, -xmid - ymid/3, true);
L2 = Linear_Eq(1, -1, -xmid+ymid/3, false);
L3 = Linear_Eq(1, 1, -xmid-5*ymid/3, false);
L4 = Linear_Eq(1,-1,-xmid+5*ymid/3, true);
L5 = Linear_Eq(1, 1, -xmid - ymid/2, true);
L6 = Linear_Eq(1, -1, -xmid + 3*ymid/2, true);
L7 = Linear_Eq(1, 0, -xmid, false);
box = new Bounding_box( xmid/3, ymid/3,
5*xmid/3+1, ymid/3,
5*xmid/3+1, 5*ymid/3+1,
xmid/3, 5*ymid/3+1);
}
// ========================= Rhombus_III =============================
void Rhombus_III::transform (Affine_map T) {
L1.transform(T); L2.transform(T); L3.transform(T);
L4.transform(T); L5.transform(T); L6.transform(T);
L7.transform(T); L8.transform(T); L9.transform(T);
L10.transform(T); L11.transform(T); L12.transform(T);
box->transform(T);
}
void Rhombus_III::gen_bitmap() {
CPU_Shape::gen_bitmap();
int x, y;
for (int i = 0; i < xmax*ymax; i++) {
x = (i % xmax) * 1 ;
y = (i / xmax) * 1 ;
if ( L1.in_halfspace(x,y) && L2.in_halfspace(x,y) && L3.in_halfspace(x,y) && L4.in_halfspace(x,y) &&
!(L5.in_halfspace(x,y) && L6.in_halfspace(x,y) && L9.in_halfspace(x,y)) &&
!(L5.in_halfspace(x,y) && L8.in_halfspace(x,y) && L10.in_halfspace(x,y)) &&
!(L7.in_halfspace(x,y) && L6.in_halfspace(x,y) && L11.in_halfspace(x,y)) &&
!(L7.in_halfspace(x,y) && L8.in_halfspace(x,y) && L12.in_halfspace(x,y)) )
bitmap[i] = 1;
else bitmap[i] = 0;
}
}
void Rhombus_III::copy (Shape* s) {
Rhombus_III* r = dynamic_cast<Rhombus_III*>(s);
this->L1 = r->L1; this->L2 = r->L2; this->L3 = r->L3; this->L4 = r->L4;
this->L5 = r->L5; this->L6 = r->L6; this->L7 = r->L7; this->L8 = r->L8;
this->L9 = r->L9; this->L10 = r->L10; this->L11 = r->L11; this->L12 = r->L12;
*this->box = *r->box;
}
Rhombus_III::Rhombus_III () {
L1 = Linear_Eq(1, 1, -xmid - ymid/3, true);
L2 = Linear_Eq(1, -1, -xmid+ymid/3, false);
L3 = Linear_Eq(1, 1, -xmid-5*ymid/3, false);
L4 = Linear_Eq(1,-1,-xmid+5*ymid/3, true);
L5 = Linear_Eq(1, 0, -15*xmid/16, false);
L6 = Linear_Eq(0, 1, -15*ymid/16, false);
L7 = Linear_Eq(1, 0, -17*xmid/16, true);
L8 = Linear_Eq(0, 1, -17*ymid/16, true);
L9 = Linear_Eq(1, 1, -xmid - ymid/2, true);
L10 = Linear_Eq(1, -1, -xmid + 3*ymid/2, true);
L11 = Linear_Eq(1, -1, -xmid+ymid/2 , false);
L12 = Linear_Eq(1, 1, -xmid-3*ymid/2, false);
box = new Bounding_box( xmid/3, ymid/3,
5*xmid/3+1, ymid/3,
5*xmid/3+1, 5*ymid/3+1,
xmid/3, 5*ymid/3+1);
}
// ========================= Rhombus_IV =============================
void Rhombus_IV::transform (Affine_map T) {
L1.transform(T); L2.transform(T); L3.transform(T);
L4.transform(T); L5.transform(T); L6.transform(T);
box->transform(T);
}
void Rhombus_IV::gen_bitmap() {
CPU_Shape::gen_bitmap();
int x, y;
for (int i = 0; i < xmax*ymax; i++) {
x = (i % xmax) * 1 ;
y = (i / xmax) * 1 ;
if ( (L1.in_halfspace(x,y) &&
L2.in_halfspace(x,y) &&
L3.in_halfspace(x,y)) ||
(L4.in_halfspace(x,y) &&
L5.in_halfspace(x,y) &&
L6.in_halfspace(x,y)) )
bitmap[i] = 1;
else bitmap[i] = 0;
}
}
void Rhombus_IV::copy (Shape* s) {
Rhombus_IV* r = dynamic_cast<Rhombus_IV*>(s);
this->L1 = r->L1; this->L2 = r->L2; this->L3 = r->L3; this->L4 = r->L4;
this->L5 = r->L5; this->L6 = r->L6;
*this->box = *r->box;
}
Rhombus_IV::Rhombus_IV () {
L1 = Linear_Eq(1, -1, ymid - xmid, true);
L2 = Linear_Eq(1, 1, -ymid - xmid, false);
L3 = Linear_Eq(0, 1, -ymid/2, true);
L4 = Linear_Eq(1, -1, ymid-xmid, false);
L5 = Linear_Eq(1, 1, -ymid - xmid, true);
L6 = Linear_Eq(0, 1, -3*ymid/2, false);
box = new Bounding_box( xmid/2, ymid/2,
3*xmid/2+1, ymid/2,
3*xmid/2+1, 3*ymid/2+1,
xmid/2, 3*ymid/2+1);
}
// ========================= Diamond =============================
void Diamond::transform (Affine_map T) {
Q1.transform(T); Q2.transform(T); Q3.transform(T); Q4.transform(T);
L1.transform(T); L2.transform(T); L3.transform(T); L4.transform(T);
box->transform(T);
}
void Diamond::gen_bitmap() {
CPU_Shape::gen_bitmap();
int x, y;
for (int i = 0; i < xmax*ymax; i++) {
x = (i % xmax) * 1 ;
y = (i / xmax) * 1 ;
if ( Q1.in_halfspace(x,y) && Q2.in_halfspace(x,y) &&
Q3.in_halfspace(x,y) && Q4.in_halfspace(x,y) &&
L1.in_halfspace(x,y) && L2.in_halfspace(x,y) &&
L3.in_halfspace(x,y) && L4.in_halfspace(x,y) )
bitmap[i] = 1;
else bitmap[i] = 0;
}
}
void Diamond::copy (Shape* s) {
Diamond* d = dynamic_cast<Diamond*>(s);
this->Q1 = d->Q1; this->Q2 = d->Q2; this->Q3 = d->Q3; this->Q4 = d->Q4;
this->L1 = d->L1; this->L2 = d->L2; this->L3 = d->L3; this->L4 = d->L4;
*this->box = *d->box;
}
Diamond::Diamond () {
float x1 = xmid/4, A1 = -8*ymid/(3*xmid*(3*xmid - 4*x1)), B1 = ymid/3 - A1 * (xmid-x1)*(xmid-x1),
x2 = 7*xmid/4, A2 = 8*ymid/(3*xmid*(5*xmid - 4*x2)), B2 = ymid/3 - A2 * (xmid-x2)*(xmid-x2),
x3 = x1, A3 = -A1, B3 = 5*ymid/3 - A3 * (xmid-x3)*(xmid-x3),
x4 = x2, A4 = -A2, B4 = 5*ymid/3 - A4 * (xmid-x4)*(xmid-x4);
Q1 = Quadratic_Eq(A1, 0, 0, -2*A1*x1, -1, A1*x1*x1 + B1, false);
Q2 = Quadratic_Eq(A2, 0, 0, -2*A2*x2, -1, A2*x2*x2 + B2, false);
Q3 = Quadratic_Eq(A3, 0, 0, -2*A3*x3, -1, A3*x3*x3 + B3, true);
Q4 = Quadratic_Eq(A4, 0, 0, -2*A4*x4, -1, A4*x4*x4 + B4, true);
L1 = Linear_Eq(1, 0, -xmid/2, true);
L2 = Linear_Eq(1, 0, -3*xmid/2, false);
L3 = Linear_Eq(0, 1, -ymid/3, true);
L4 = Linear_Eq(0, 1, -5*ymid/3, false);
box = new Bounding_box( xmid/2, ymid/3,
3*xmid/2+1, ymid/3,
3*xmid/2+1, 5*ymid/3+1,
xmid/2, 5*ymid/3+1);
}
// ========================= Club =============================
void Club::transform (Affine_map T) {
Q1.transform(T); Q2.transform(T); Q3.transform(T); Q4.transform(T); Q5.transform(T);
L1.transform(T); L2.transform(T); L3.transform(T); L4.transform(T);
L5.transform(T); L6.transform(T); L7.transform(T);
box->transform(T);
}
void Club::gen_bitmap() {
CPU_Shape::gen_bitmap();
int x, y;
for (int i = 0; i < xmax*ymax; i++) {
x = (i % xmax) * 1 ;
y = (i / xmax) * 1 ;
if ( (Q1.in_halfspace(x,y) || Q2.in_halfspace(x,y) || Q3.in_halfspace(x,y) ||
(Q4.in_halfspace(x,y) && Q5.in_halfspace(x,y) && L5.in_halfspace(x,y) && L6.in_halfspace(x,y) && L7.in_halfspace(x,y))) &&
L1.in_halfspace(x,y) && L2.in_halfspace(x,y) &&
L3.in_halfspace(x,y) && L4.in_halfspace(x,y) )
bitmap[i] = 1;
else bitmap[i] = 0;
}
}
void Club::copy (Shape* s) {
Club* c = dynamic_cast<Club*>(s);
this->Q1 = c->Q1; this->Q2 = c->Q2; this->Q3 = c->Q3; this->Q4 = c->Q4; this->Q5 = c->Q5;
this->L1 = c->L1; this->L2 = c->L2; this->L3 = c->L3; this->L4 = c->L4;
this->L5 = c->L5; this->L6 = c->L6; this->L7 = c->L7;
*this->box = *c->box;
}
Club::Club () {
float A = - 9*ymid/(2*xmid*xmid), B = 7*ymid/4, x0 = 2*xmid/3, x1 = 4*xmid/3;
// Three circles
Q1 = Quadratic_Eq(1, 0, 1, -2*xmid, -5*ymid/4, xmid*xmid + 161*ymid*ymid/576, false);
Q2 = Quadratic_Eq(1, 0, 1, -3*xmid/2, -9*ymid/4, 9*xmid*xmid/16 + 665*ymid*ymid/576, false);
Q3 = Quadratic_Eq(1, 0, 1, -5*xmid/2, -9*ymid/4, 25*xmid*xmid/16 + 665*ymid*ymid/576, false);
// The stem
Q4 = Quadratic_Eq(A, 0, 0, -2*A*x0, -1, A*x0*x0 + B, false);
Q5 = Quadratic_Eq(A, 0, 0, -2*A*x1, -1, A*x1*x1 + B, false);
L5 = Linear_Eq(1, 0, -2*xmid/3, true);
L6 = Linear_Eq(1, 0, -4*xmid/3, false);
L7 = Linear_Eq(0, 1, -5*ymid/3, false);
// Boundaries
L1 = Linear_Eq(1, 0, -5*xmid/12, true);
L2 = Linear_Eq(1, 0, -19*xmid/12, false);
L3 = Linear_Eq(0, 1, -7*ymid/24, true);
L4 = Linear_Eq(0, 1, -5*ymid/3, false);
box = new Bounding_box( 5*xmid/12, 7*ymid/24,
19*xmid/12+1, 7*ymid/24,
19*xmid/12+1, 5*ymid/3+1,
5*xmid/12, 5*ymid/3+1);
}
// ========================= Heart =============================
void Heart::transform (Affine_map T) {
Q1.transform(T); Q2.transform(T); C3.transform(T); C4.transform(T);
L11.transform(T); L12.transform(T); L21.transform(T); L22.transform(T);
L31.transform(T); L32.transform(T); L41.transform(T); L42.transform(T);
box->transform(T);
}
void Heart::gen_bitmap() {
CPU_Shape::gen_bitmap();
int x, y;
for (int i = 0; i < xmax*ymax; i++) {
x = (i % xmax) * 1 ;
y = (i / xmax) * 1 ;
if ( (Q1.in_halfspace(x,y) && L11.in_halfspace(x,y) && L12.in_halfspace(x,y)) ||
(Q2.in_halfspace(x,y) && L21.in_halfspace(x,y) && L22.in_halfspace(x,y)) ||
(C3.in_halfspace(x,y) && L31.in_halfspace(x,y) && L32.in_halfspace(x,y)) ||
(C4.in_halfspace(x,y) && L41.in_halfspace(x,y) && L42.in_halfspace(x,y)) )
bitmap[i] = 1;
else bitmap[i] = 0;
}
}
void Heart::copy (Shape* s) {
Heart* h = dynamic_cast<Heart*>(s);
this->Q1 = h->Q1; this->Q2 = h->Q2; this->C3 = h->C3; this->C4 = h->C4;
this->L11 = h->L11; this->L12 = h->L12; this->L21 = h->L21; this->L22 = h->L22;
this->L31 = h->L31; this->L32 = h->L32; this->L41 = h->L41; this->L42 = h->L42;
*this->box = *h->box;
}
Heart::Heart () {
float A = xmid/3 + xmid/25,
B = ymid/2,
C = (5*ymid) / (4*xmid*((2*xmid/5)*(2*xmid/5) + 999)),
D = 11*ymid/8,
x0 = 2*xmid/3 + xmid/25, y0 = 7*ymid/8,
x1 = 4*xmid/3 - xmid/25, y1 = 7*ymid/8;
Q1 = Quadratic_Eq(B*B, 0, A*A, -B*B*2*x0, -A*A*2*y0, B*B*x0*x0 + A*A*y0*y0 - A*A*B*B, false);
Q2 = Quadratic_Eq(B*B, 0, A*A, -B*B*2*x1, -A*A*2*y1, B*B*x1*x1 + A*A*y1*y1 - A*A*B*B, false);
// C3, non-expanded: ( ( C * (x - 2*xm/3) * 0.001 * ( (x-2*xm/3)*(x-2*xm/3) + 999) + D - y ) >= 0 );
C3 = Cubic_Eq(C, 0, 0, 0, -2*C*xmid, 0, 0, C*(1.333*xmid*xmid+999), -1, D - C*(8*xmid*xmid*xmid/27 + 666*xmid), true);
// x^3 x^2 xy y^2 x y 1
C4 = Cubic_Eq(-C, 0, 0, 0, 4*C*xmid, 0, 0, -C*(16*xmid*xmid/3 + 999), -1, D + C*(64*xmid*xmid*xmid/27 + 1332*xmid), true);
L11 = Linear_Eq(0, 1, -ymid, false);
L12 = Linear_Eq(1, 0, -xmid, false);
L21 = L11;
L22 = Linear_Eq(1, 0, -xmid, true);
L31 = Linear_Eq(0, 1, -ymid, true);
L32 = L12;
L41 = L31;
L42 = L22;
float ybot = C*xmid/3*( xmid*xmid/9 + 999 ) + D;
box = new Bounding_box( x0-A, y0-B,
x1+A+1, y0-B,
x1+A+1, ybot+1,
x0-A, ybot+1);
}
// ========================= Spade =============================
void Spade::transform (Affine_map T) {
Q1.transform(T); Q2.transform(T); C3.transform(T); C4.transform(T);
L11.transform(T); L12.transform(T); L21.transform(T); L22.transform(T);
L31.transform(T); L32.transform(T); L41.transform(T); L42.transform(T);
Q5.transform(T); Q6.transform(T);
L1.transform(T); L2.transform(T); L3.transform(T);
box->transform(T);
}
void Spade::gen_bitmap() {
CPU_Shape::gen_bitmap();
int x, y;
for (int i = 0; i < xmax*ymax; i++) {
x = (i % xmax) * 1 ;
y = (i / xmax) * 1 ;
if ( (Q1.in_halfspace(x,y) && L11.in_halfspace(x,y) && L12.in_halfspace(x,y)) ||
(Q2.in_halfspace(x,y) && L21.in_halfspace(x,y) && L22.in_halfspace(x,y)) ||
(C3.in_halfspace(x,y) && L31.in_halfspace(x,y) && L32.in_halfspace(x,y)) ||
(C4.in_halfspace(x,y) && L41.in_halfspace(x,y) && L42.in_halfspace(x,y)) ||
(Q5.in_halfspace(x,y) && Q6.in_halfspace(x,y) && L1.in_halfspace(x,y) && L2.in_halfspace(x,y) && L3.in_halfspace(x,y)) )
bitmap[i] = 1;
else bitmap[i] = 0;
}
}
void Spade::copy (Shape* s) {
Spade* h = dynamic_cast<Spade*>(s);
this->Q1 = h->Q1; this->Q2 = h->Q2; this->C3 = h->C3; this->C4 = h->C4;
this->Q5 = h->Q5; this->Q6 = h->Q6;
this->L11 = h->L11; this->L12 = h->L12; this->L21 = h->L21; this->L22 = h->L22;
this->L31 = h->L31; this->L32 = h->L32; this->L41 = h->L41; this->L42 = h->L42;
this->L1 = h->L1; this->L2 = h->L2; this->L3 = h->L3;
*this->box = *h->box;
}
Spade::Spade () {
// The upside-down heart
float A = xmid/3 + xmid/25,
B = 2*ymid/5,
C = (5*ymid) / (4*xmid*((2*xmid/5)*(2*xmid/5) + 999)),
D = 11*ymid/8 - 2*ymid,
x0 = 2*xmid/3 + xmid/25, y0 = 17*ymid/16, // Centers of the two ellipses
x1 = 4*xmid/3 - xmid/25, y1 = 17*ymid/16;
Q1 = Quadratic_Eq(B*B, 0, A*A, -B*B*2*x0, -A*A*2*y0, B*B*x0*x0 + A*A*y0*y0 - A*A*B*B, false);
Q2 = Quadratic_Eq(B*B, 0, A*A, -B*B*2*x1, -A*A*2*y1, B*B*x1*x1 + A*A*y1*y1 - A*A*B*B, false);
C3 = Cubic_Eq(C, 0, 0, 0, -2*C*xmid, 0, 0, C*(1.333*xmid*xmid+999), 1, D - C*(8*xmid*xmid*xmid/27 + 666*xmid), true);
// x^3 x^2 xy y^2 x y 1
C4 = Cubic_Eq(-C, 0, 0, 0, 4*C*xmid, 0, 0, -C*(16*xmid*xmid/3 + 999), 1, D + C*(64*xmid*xmid*xmid/27 + 1332*xmid), true);
L11 = Linear_Eq(0, 1, -ymid, true);
L12 = Linear_Eq(1, 0, -xmid, false);
L21 = L11;
L22 = Linear_Eq(1, 0, -xmid, true);
L31 = Linear_Eq(0, 1, -ymid, false);
L32 = L12;
L41 = L31;
L42 = L22;
// The stem
float E = - 9*ymid/(2*xmid*xmid), F = 7*ymid/4, x2 = 2*xmid/3, x3 = 4*xmid/3;
Q5 = Quadratic_Eq(E, 0, 0, -2*E*x2, -1, E*x2*x2 + F, false);
Q6 = Quadratic_Eq(E, 0, 0, -2*E*x3, -1, E*x3*x3 + F, false);
L1 = Linear_Eq(1, 0, -2*xmid/3, true);
L2 = Linear_Eq(1, 0, -4*xmid/3, false);
L3 = Linear_Eq(0, 1, -5*ymid/3, false);
float ytop = -C*xmid/3*( xmid*xmid/9 + 999 ) - D;
box = new Bounding_box( x0-A, ytop,
x1+A+1, ytop,
x1+A+1, 5*ymid/3+1,
x0-A, 5*ymid/3+1);
}
// =====================================================================
// =====================================================================
// =====================================================================
CPU_Shape* allocate_mem_by_shape_type(Shape_type type) {
CPU_Shape* shape_link = 0;
switch (type) {
case TRIANGLE: shape_link = new Triangle();
break;
case SQUARE: shape_link = new Square();
break;
case PENTAGON: shape_link = new Pentagon();
break;
case HEXAGON: shape_link = new Hexagon();
break;
case CIRCLE: shape_link = new Circle();
break;
case CIRCLE_II: shape_link = new Circle_II();
break;
case CIRCLE_III: shape_link = new Circle_III();
break;
case CIRCLE_IV: shape_link = new Circle_IV();
break;
case RHOMBUS: shape_link = new Rhombus();
break;
case RHOMBUS_II: shape_link = new Rhombus_II();
break;
case RHOMBUS_III: shape_link = new Rhombus_III();
break;
case RHOMBUS_IV: shape_link = new Rhombus_IV();
break;
case HEART: shape_link = new Heart();
break;
case DIAMOND: shape_link = new Diamond();
break;
case CLUB: shape_link = new Club();
break;
case SPADE: shape_link = new Spade();
break;
}
return shape_link;
}
std::string name_by_shape_type(Shape_type type) {
switch (type) {
case TRIANGLE: return "Triangle";
case SQUARE: return "Square";
case PENTAGON: return "Pentagon";
case HEXAGON: return "Hexagon";
case CIRCLE: return "Circle";
case CIRCLE_II: return "Circle II";
case CIRCLE_III: return "Circle III";
case CIRCLE_IV: return "Circle IV";
case RHOMBUS: return "Rhombus";
case RHOMBUS_II: return "Rhombus II";
case RHOMBUS_III: return "Rhombus III";
case RHOMBUS_IV: return "Rhombus IV";
case HEART: return "Heart";
case DIAMOND: return "Diamond";
case CLUB: return "Club";
case SPADE: return "Spade";
default: return "Unrecognized shape in name_by_shape_type"; // Error
}
}
std::string file_name_by_shape_type(Shape_type type) {
switch (type) {
case TRIANGLE: return "triangle";
case SQUARE: return "square";
case PENTAGON: return "pentagon";
case HEXAGON: return "hexagon";
case CIRCLE: return "circle";
case CIRCLE_II: return "circleii";
case CIRCLE_III: return "circleiii";
case CIRCLE_IV: return "circleiv";
case RHOMBUS: return "rhombus";
case RHOMBUS_II: return "rhombusii";
case RHOMBUS_III: return "rhombusiii";
case RHOMBUS_IV: return "rhombusiv";
case HEART: return "heart";
case DIAMOND: return "diamond";
case CLUB: return "club";
case SPADE: return "spade";
default: return "Unrecognized shape in file_name_by_shape_type"; // Error
}
}
| edd95096257ad5de470ddcff2134d8acdee028bc.cu | #include "shapes.hpp"
#include <iostream>
#include <fstream>
using namespace std;
Affine_map::Affine_map( float a11, float a12,
float a21, float a22,
float b1, float b2) {
this->a11 = a11; this->a12 = a12;
this->a21 = a21; this->a22 = a22;
this->b1 = b1; this->b2 = b2;
this->det = a11 * a22 - a12 * a21;
}
// Generating error (comes up in the gen_translation function of Bounding_box)
Generr::Generr() {};
// Initializing the static members of Bounding_box
std::random_device Bounding_box::rdev;
std::mt19937 Bounding_box::gen(rdev());
void Bounding_box::transform (Affine_map T) {
float new_x, new_y;
for (int i = 0; i < 4; i++) {
new_x = T.a11*x[i] + T.a12*y[i] + T.b1;
new_y = T.a21*x[i] + T.a22*y[i] + T.b2;
x[i] = new_x;
y[i] = new_y;
xmax = std::max(xmax, new_x); xmin = std::min(xmin, new_x);
ymax = std::max(ymax, new_y); ymin = std::min(ymin, new_y);
}
xh = x_bound - xmax;
xl = -xmin;
yh = y_bound - ymax;
yl = -ymin;
}
void Bounding_box::print_to_file (ofstream& to) {
for (int i = 0; i < 4; i++)
to << x[i] << " " << y[i] << ",";
to << "!";
}
Affine_map Bounding_box::gen_translation() {
if ( (xl >= xh) || (yl >= yh) ) throw Generr();
// A translation that moves the shape inside the canvas can't
// be generated
uniform_int_distribution<int> xdistr (xl, xh);
uniform_int_distribution<int> ydistr (yl, yh);
Affine_map T = Affine_map(1, 0, 0, 1, xdistr(gen), ydistr(gen));
return T;
}
Bounding_box::Bounding_box (float x0, float y0,
float x1, float y1,
float x2, float y2,
float x3, float y3,
float x_bound, float y_bound) {
x[0] = x0; y[0] = y0;
x[1] = x1; y[1] = y1;
x[2] = x2; y[2] = y2;
x[3] = x3; y[3] = y3;
}
// =====================================================================
// =================== Equations ====================================
// =====================================================================
void Linear_Eq::transform (Affine_map T) {
float new_A = (1/T.det) * ( A * T.a22 - B * T.a21),
new_B = (1/T.det) * ( -A * T.a12 + B * T.a11),
new_C = (1/T.det) * ( A*(-T.a22*T.b1 + T.a12*T.b2) + B*(T.a21*T.b1 - T.a11*T.b2)) + C;
A = new_A; B = new_B; C = new_C;
}
bool Linear_Eq::in_halfspace(int x, int y) {
bool c = ((A*x + B*y + C) >= 0);
if ((geq && c) || (!geq && !c)) return true;
else return false;
}
Linear_Eq::Linear_Eq (float A, float B, float C, bool geq) {
this->A = A; this->B = B; this->C = C;
this->geq = geq;
}
// ===================================================================
void Quadratic_Eq::transform (Affine_map T) {
float new_A = (1/T.det) * (1/T.det) * (A*T.a22*T.a22
- B*T.a21*T.a22
+ C*T.a21*T.a21),
new_B = (1/T.det) * (1/T.det) * (-2*A*T.a12*T.a22
+ B*(T.a11*T.a22 + T.a12*T.a21)
- 2*C*(T.a11*T.a21)),
new_C = (1/T.det) * (1/T.det) * (A*T.a12*T.a12
- B*T.a11*T.a12
+ C*T.a11*T.a11),
new_D = (1/T.det) * ( (1/T.det) * (2*A*(T.a12*T.a22*T.b2 - T.a22*T.a22*T.b1)
+ B*(2*T.a21*T.a22*T.b1 - T.a11*T.a22*T.b2 - T.a21*T.a12*T.b2)
+ 2*C*(T.a11*T.a21*T.b2 - T.a21*T.a21*T.b1))
+ D*T.a22 - E*T.a21),
new_E = (1/T.det) * ( (1/T.det) * (2*A*(T.a12*T.a22*T.b1 - T.a12*T.a12*T.b2)
+ B*(2*T.a11*T.a12*T.b2 - T.a12*T.a21*T.b1 - T.a11*T.a22*T.b1)
+ 2*C*(T.a11*T.a21*T.b1 - T.a11*T.a11*T.b2))
- D*T.a12 + E*T.a11),
new_F = (1/T.det) * ( (1/T.det) * (A*(T.a22*T.a22*T.b1*T.b1 + T.a12*T.a12*T.b2*T.b2 - 2*T.a12*T.a22*T.b1*T.b2)
+ B*((T.a11*T.a22 + T.a12*T.a21)*T.b1*T.b2 - T.a21*T.a22*T.b1*T.b1 - T.a11*T.a12*T.b2*T.b2)
+ C*(T.a21*T.a21*T.b1*T.b1 + T.a11*T.a11*T.b2*T.b2 - 2*T.a11*T.a21*T.b1*T.b2))
+ D*(-T.a22*T.b1 + T.a12*T.b2) + E*(T.a21*T.b1 - T.a11*T.b2)) + F;
A = new_A; B = new_B; C = new_C;
D = new_D; E = new_E; F = new_F;
}
bool Quadratic_Eq::in_halfspace (int x, int y) {
bool c = ( (A*x*x + B*x*y + C*y*y + D*x + E*y + F) >= 0);
if ((geq && c) || (!geq && !c)) return true;
else return false;
}
Quadratic_Eq::Quadratic_Eq (float A, float B, float C, float D, float E, float F, bool geq) {
this->A = A; this->B = B; this->C = C;
this->D = D; this->E = E; this->F = F;
this->geq = geq;
}
// ===================================================================
void Cubic_Eq::transform (Affine_map T) {
float new_A = (1/T.det) * (1/T.det) * (1/T.det) * (A*T.a22*T.a22*T.a22 // u^3
-B*T.a21*T.a22*T.a22
+C*T.a21*T.a21*T.a22
-D*T.a21*T.a21*T.a21),
new_B = (1/T.det) * (1/T.det) * (1/T.det) * ((-1)*3*A*T.a12*T.a22*T.a22 // u^2 v
+B*T.a22*(T.a11*T.a22 + 2*T.a12*T.a21)
-C*T.a21*(2*T.a11*T.a22 + T.a12*T.a21)
+3*D*T.a11*T.a21*T.a21),
new_C = (1/T.det) * (1/T.det) * (1/T.det) * (3*A*T.a12*T.a12*T.a22 // uv^2
-B*T.a12*(2*T.a11*T.a22 + T.a12*T.a21)
+C*T.a11*(T.a11*T.a22 + 2*T.a12*T.a21)
-3*D*T.a11*T.a11*T.a21),
new_D = (1/T.det) * (1/T.det) * (1/T.det) * ((-1)*A*T.a12*T.a12*T.a12 // v^3
+B*T.a11*T.a12*T.a12
+(-1)*C*T.a11*T.a11*T.a12
+D*T.a11*T.a11*T.a11),
new_E = (1/T.det) * (1/T.det) * ( (1/T.det) * (3*A*T.a22*T.a22*(T.a12*T.b2 - T.a22*T.b1) // u^2
+B*T.a22*(3*T.a21*T.a22*T.b1 - T.a11*T.a22*T.b2 - 2*T.a12*T.a21*T.b2)
+C*T.a21*(-3*T.a21*T.a22*T.b1 + 2*T.a11*T.a22*T.b2 + T.a12*T.a21*T.b2)
+3*D*T.a21*T.a21*(T.a21*T.b1 - T.a11*T.b2))
+E*T.a22*T.a22
-F*T.a21*T.a22
+G*T.a21*T.a21),
new_F = (1/T.det) * (1/T.det) * ( (1/T.det) * (6*A*T.a12*T.a22*(T.a22*T.b1 - T.a12*T.b2) // uv
+B*(4*T.a12*T.a22*(T.a11*T.b2 - T.a21*T.b1) + 2*((-1)*T.a11*T.a22*T.a22*T.b1 + T.a12*T.a12*T.a21*T.b2))
+C*( 4*T.a11*T.a21*(T.a22*T.b1 - T.a12*T.b2) + 2*(T.a12*T.a21*T.a21*T.b1 - T.a11*T.a11*T.a22*T.b2) )
+6*D*T.a11*T.a21*(T.a11*T.b2 - T.a21*T.b1) )
-2*E*T.a12*T.a22
+ F*(T.a11*T.a22 + T.a12*T.a21)
- 2*G*(T.a11*T.a21)),
new_G = (1/T.det) * (1/T.det) * ( (1/T.det) * (3*A*T.a12*T.a12*(T.a12*T.b2 - T.a22*T.b1) // v^2
+B*T.a12*(2*T.a11*T.a22*T.b1 + T.a12*T.a21*T.b1 - 3*T.a11*T.a12*T.b2)
+C*T.a11*((-1)*T.a11*T.a22*T.b1 - 2*T.a12*T.a21*T.b1 + 3*T.a11*T.a12*T.b2)
+3*D*T.a11*T.a11*(T.a21*T.b1 - T.a11*T.b2) )
+E*T.a12*T.a12
-F*T.a11*T.a12
+G*T.a11*T.a11),
new_H = (1/T.det) * ( (1/T.det) * ((1/T.det) * (A*3*T.a22*(T.a22*T.b1 - T.a12*T.b2)*(T.a22*T.b1 - T.a12*T.b2) // u
+B*(2*T.a11*T.a22*T.b2*(T.a22*T.b1 - T.a12*T.b2)
+ T.a21*(3*T.a22*T.a22*T.b1*T.b1 - T.a12*T.a12*T.b2*T.b2 + 4*T.a12*T.a22*T.b1*T.b2))
+C*(2*T.a12*T.a21*T.b2*(T.a11*T.b2 - T.a21*T.b1)
+ T.a22*(3*T.a21*T.a21*T.b1*T.b1 + T.a11*T.a11*T.b2*T.b2 - 4*T.a11*T.a21*T.b1*T.b2))
-D*3*T.a21*(T.a21*T.b1 - T.a11*T.b2)*(T.a21*T.b1 - T.a11*T.b2))
+ 2*E*(T.a12*T.a22*T.b2 - T.a22*T.a22*T.b1)
+ F*(2*T.a21*T.a22*T.b1 - T.a11*T.a22*T.b2 - T.a21*T.a12*T.b2)
+ 2*G*(T.a11*T.a21*T.b2 - T.a21*T.a21*T.b1))
+ H*T.a22 - I*T.a21),
new_I = (1/T.det) * ( (1/T.det) * ((1/T.det) * ((-1)*A*3*T.a12*(T.a22*T.b1 - T.a12*T.b2)*(T.a22*T.b1 - T.a12*T.b2) // v
+B*(2*T.a12*T.a21*T.b1*(T.a22*T.b1 - T.a12*T.b2)
+ T.a11*(T.a22*T.a22*T.b1*T.b1 + 3*T.a12*T.a12*T.b2*T.b2 - 4*T.a12*T.a22*T.b1*T.b2))
+C*(2*T.a11*T.a22*T.b1*(T.a11*T.b2 - T.a21*T.b1)
+ T.a12*(T.a21*T.a21*T.b1*T.b1 - 3*T.a11*T.a11*T.b2*T.b2 + 4*T.a11*T.a21*T.b1*T.b2))
+D*3*T.a11*(T.a11*T.b2 - T.a21*T.b1)*(T.a11*T.b2 - T.a21*T.b1))
+ 2*E*(T.a12*T.a22*T.b1 - T.a12*T.a12*T.b2)
+ F*(2*T.a11*T.a12*T.b2 - T.a12*T.a21*T.b1 - T.a11*T.a22*T.b1)
+ 2*G*(T.a11*T.a21*T.b1 - T.a11*T.a11*T.b2))
- H*T.a12 + I*T.a11),
new_J = (1/T.det) * (
(1/T.det) * (
(1/T.det) * (A*(T.a12*T.b2 - T.a22*T.b1)*(T.a12*T.b2 - T.a22*T.b1)*(T.a12*T.b2 - T.a22*T.b1) // 1
+B*(T.a22*T.b1 - T.a12*T.b2)*(T.a22*T.b1 - T.a12*T.b2)*(T.a21*T.b1 - T.a11*T.b2)
+C*(T.a11*T.b2 - T.a21*T.b1)*(T.a11*T.b2 - T.a21*T.b1)*(T.a12*T.b2 - T.a22*T.b1)
+D*(T.a21*T.b1 - T.a11*T.b2)*(T.a21*T.b1 - T.a11*T.b2)*(T.a21*T.b1 - T.a11*T.b2)
)
+ E*(T.a22*T.b1 - T.a12*T.b2)*(T.a22*T.b1 - T.a12*T.b2)
+ F*((T.a11*T.a22 + T.a12*T.a21)*T.b1*T.b2 - T.a21*T.a22*T.b1*T.b1 - T.a11*T.a12*T.b2*T.b2)
+ G*(T.a21*T.b1 - T.a11*T.b2)*(T.a21*T.b1 - T.a11*T.b2)
)
+ H*(-T.a22*T.b1 + T.a12*T.b2) + I*(T.a21*T.b1 - T.a11*T.b2)
) + J;
A = new_A; B = new_B; C = new_C; D = new_D; E = new_E;
F = new_F; G = new_G; H = new_H; I = new_I; J = new_J;
}
bool Cubic_Eq::in_halfspace(int x, int y) {
bool c = ((A*x*x*x + B*x*x*y + C*x*y*y + D*y*y*y + E*x*x + F*x*y + G*y*y + H*x + I*y + J) >= 0);
if ((geq && c) || (!geq && !c)) return true;
else return false;
}
Cubic_Eq::Cubic_Eq (float A, float B, float C, float D, float E,
float F, float G, float H, float I, float J, bool geq) {
this->A = A; this->B = B; this->C = C; this->D = D; this->E = E;
this->F = F; this->G = G; this->H = H; this->I = I; this->J = J;
this->geq = geq;
}
// =====================================================================
// =================== Shapes =======================================
// =====================================================================
Shape::Shape (int xmax, int ymax) {
this->xmax = xmax; this->ymax = ymax;
this->xmid = xmax / 2; this->ymid = ymax / 2;
}
// =================== CPU Shape =======================================
Affine_map CPU_Shape::gen_translation() {
return box->gen_translation();
}
void CPU_Shape::rounden (int r) {
int i, j, a, b, c;
std::vector<coords> coord_stack;
for (i = r; i < INPUT_DIM-r; i++) {
for (j = r; j < INPUT_DIM-r; j++) {
c = i + j*INPUT_DIM;
if (bitmap[c] == 1)
coord_stack.push_back( std::make_tuple(i,j) );
}
}
for (std::vector<coords>::iterator it = coord_stack.begin(); it != coord_stack.end(); it++) {
i = std::get<0>(*it);
j = std::get<1>(*it);
for (a = -r; a <= r; a++) {
for (b = -r; b <= r; b++) {
if (a*a + b*b <= r*r) {
c = (i+a) + (j+b)*INPUT_DIM;
bitmap[c] = 1;
}
}
}
}
}
void CPU_Shape::print_to_file(ofstream& to, bool bbox) {
for (int i = 0; i < xmax*ymax; i++) {
to << bitmap[i];
} to << ",";
if (bbox) box->print_to_file(to);
}
float CPU_Shape::find_L2_distance_from (Shape* s2_in) {
CPU_Shape* s2 = static_cast<CPU_Shape*>(s2_in);
float dist = 0.0;
if ( (xmax != s2->xmax) || (ymax != s2->ymax) ) {
cout << "The shapes have different dimensions" << endl;
}
else {
for (int i = 0; i < xmax * ymax; i++) {
dist += (float) ( (bitmap[i] - s2->bitmap[i])*(bitmap[i] - s2->bitmap[i]) );
}
}
return dist;
}
void CPU_Shape::gen_bitmap() {
if (!bitmap_generated) {
bitmap = new float[xmax * ymax];
bitmap_generated = true;
}
}
CPU_Shape::~CPU_Shape() {
if (bitmap_generated) delete[] bitmap;
delete box;
}
// =================== GPU Shape =======================================
Affine_map GPU_Shape::gen_translation () { Affine_map T; return T; }
float GPU_Shape::find_L2_distance_from (Shape* s2) { return 0.0; }
void GPU_Shape::print_to_file (ofstream& to, bool bbox) {}
void GPU_Shape::gen_bitmap() {
if (!bitmap_generated) {
cudaMalloc( (void**) &bitmap, xmax * ymax * sizeof(float));
bitmap_generated = true;
}
}
GPU_Shape::GPU_Shape () {
cudaMalloc ((void**) &box, sizeof(Bounding_box));
}
GPU_Shape::~GPU_Shape () {
cudaFree(bitmap); cudaFree(box);
}
// ========================= Triangle =============================
void Triangle::transform (Affine_map T) {
L1.transform(T); L2.transform(T); L3.transform(T);
box->transform(T);
}
void Triangle::gen_bitmap() {
CPU_Shape::gen_bitmap();
int x, y;
for (int i = 0; i < xmax*ymax; i++) {
x = (i % xmax) * 1 ;
y = (i / xmax) * 1 ;
if ( L1.in_halfspace(x,y) &&
L2.in_halfspace(x,y) &&
L3.in_halfspace(x,y) )
bitmap[i] = 1;
else bitmap[i] = 0;
}
}
void Triangle::copy (Shape* s) {
Triangle* t = dynamic_cast<Triangle*>(s);
this->L1 = t->L1; this->L2 = t->L2; this->L3 = t->L3;
*this->box = *t->box;
}
Triangle::Triangle() {
L1 = Linear_Eq(1, 0.577 * xmid / ymid, -1.289 * xmid, true);
L2 = Linear_Eq(1, -0.577 * xmid / ymid, -0.711 * xmid, false);
L3 = Linear_Eq(0, 1, -3*ymid/2, false);
box = new Bounding_box( xmid - 0.577*ymid-1, ymid/2-1,
xmid + 0.577*ymid+1, ymid/2-1,
xmid + 0.577*ymid-1, 3*ymid/2+1,
xmid - 0.577*ymid+1, 3*ymid/2+1);
}
void GPU_Triangle::transform (Affine_map T) {
L1->transform(T); L2->transform(T); L3->transform(T);
box->transform(T);
}
void GPU_Triangle::gen_bitmap () {}
void GPU_Triangle::copy (Shape* s) {}
GPU_Triangle::GPU_Triangle() {
Linear_Eq *L1_host = new Linear_Eq(1, 0.577 * xmid / ymid, -1.289 * xmid, true),
*L2_host = new Linear_Eq(1, -0.577 * xmid / ymid, -0.711 * xmid, false),
*L3_host = new Linear_Eq(0, 1, -3*ymid/2, false);
Bounding_box* box_host = new Bounding_box( xmid - 0.577*ymid-1, ymid/2-1,
xmid + 0.577*ymid+1, ymid/2-1,
xmid + 0.577*ymid-1, 3*ymid/2+1,
xmid - 0.577*ymid+1, 3*ymid/2+1);
cudaMemcpy (L1, L1_host, sizeof(Linear_Eq), cudaMemcpyHostToDevice);
cudaMemcpy (L2, L2_host, sizeof(Linear_Eq), cudaMemcpyHostToDevice);
cudaMemcpy (L3, L3_host, sizeof(Linear_Eq), cudaMemcpyHostToDevice);
cudaMemcpy (box, box_host, sizeof(Bounding_box), cudaMemcpyHostToDevice);
delete L1_host; delete L2_host; delete L3_host; delete box_host;
}
// ========================= Square =============================
void Square::transform (Affine_map T) {
L1.transform(T); L2.transform(T); L3.transform(T); L4.transform(T);
box->transform(T);
}
void Square::gen_bitmap() {
CPU_Shape::gen_bitmap();
int x, y;
for (int i = 0; i < xmax*ymax; i++) {
x = (i % xmax) * 1 ;
y = (i / xmax) * 1 ;
if ( L1.in_halfspace(x,y) &&
L2.in_halfspace(x,y) &&
L3.in_halfspace(x,y) &&
L4.in_halfspace(x,y) )
bitmap[i] = 1;
else bitmap[i] = 0;
}
}
void Square::copy (Shape* s) {
Square* sq = dynamic_cast<Square*>(s);
this->L1 = sq->L1; this->L2 = sq->L2; this->L3 = sq->L3; this->L4 = sq->L4;
*this->box = *sq->box;
}
Square::Square() {
L1 = Linear_Eq(1, 0, -xmid/2, true);
L2 = Linear_Eq(1, 0, -3*xmid/2, false);
L3 = Linear_Eq(0, 1, -ymid/2, true);
L4 = Linear_Eq(0, 1, -3*ymid/2, false);
box = new Bounding_box( xmid/2-1, ymid/2-1,
3*xmid/2+1, ymid/2-1,
3*xmid/2+1, 3*ymid/2+1,
xmid/2-1, 3*ymid/2+1);
}
// ========================= Pentagon =============================
void Pentagon::transform (Affine_map T) {
box->transform(T);
L1.transform(T); L2.transform(T); L3.transform(T);
L4.transform(T); L5.transform(T);
}
void Pentagon::gen_bitmap() {
CPU_Shape::gen_bitmap();
int x, y;
for (int i = 0; i < xmax*ymax; i++) {
x = (i % xmax) * 1 ;
y = (i / xmax) * 1 ;
if ( L1.in_halfspace(x,y) &&
L2.in_halfspace(x,y) &&
L3.in_halfspace(x,y) &&
L4.in_halfspace(x,y) &&
L5.in_halfspace(x,y) )
bitmap[i] = 1;
else bitmap[i] = 0;
}
}
void Pentagon::copy (Shape* s) {
Pentagon* p = dynamic_cast<Pentagon*>(s);
this->L1 = p->L1; this->L2 = p->L2; this->L3 = p->L3; this->L4 = p->L4;
this->L5 = p->L5;
*this->box = *p->box;
}
Pentagon::Pentagon() {
L1 = Linear_Eq(1.376, -1, -2.065 * xmid + ymid, false);
L2 = Linear_Eq(-0.325, -1, ymid - 0.1 * xmid, false);
L3 = Linear_Eq(1, 0, -0.595*xmid, true );
L4 = Linear_Eq(0.325, -1, ymid + 0.1 * xmid, true);
L5 = Linear_Eq(-1.376, -1, 2.065 * xmid + ymid, true);
box = new Bounding_box( 0.595*xmid, ymid/2,
3*xmid/2, ymid/2,
3*xmid/2, 3*ymid/2,
0.595*xmid, 3*ymid/2);
// The default pentagon defined as above is slightly too small
Affine_map T = Affine_map(1.3, 0, 0, 1.3, -20, -20);
transform(T);
}
// ========================= Hexagon =============================
void Hexagon::transform (Affine_map T) {
L1.transform(T); L2.transform(T); L3.transform(T);
L4.transform(T); L5.transform(T); L6.transform(T);
box->transform(T);
}
void Hexagon::gen_bitmap() {
CPU_Shape::gen_bitmap();
int x, y;
for (int i = 0; i < xmax*ymax; i++) {
x = (i % xmax) * 1 ;
y = (i / xmax) * 1 ;
if ( L1.in_halfspace(x,y) &&
L2.in_halfspace(x,y) &&
L3.in_halfspace(x,y) &&
L4.in_halfspace(x,y) &&
L5.in_halfspace(x,y) &&
L6.in_halfspace(x,y) )
bitmap[i] = 1;
else bitmap[i] = 0;
}
}
void Hexagon::copy (Shape* s) {
Hexagon* h = dynamic_cast<Hexagon*>(s);
this->L1 = h->L1; this->L2 = h->L2; this->L3 = h->L3; this->L4 = h->L4;
this->L5 = h->L5; this->L6 = h->L6;
*this->box = *h->box;
}
Hexagon::Hexagon() {
L1 = Linear_Eq(1, 0, -1.577*xmid, false);
L2 = Linear_Eq(0.577*ymid/xmid, -1, -0.244*ymid, false);
L3 = Linear_Eq(-0.577*ymid/xmid, -1, 0.911 * ymid, false);
L4 = Linear_Eq(1, 0, -0.423*xmid, true);
L5 = Linear_Eq(0.577*ymid/xmid, -1, 1.089 * ymid, true);
L6 = Linear_Eq(-0.577*ymid/xmid, -1, 2.244 * ymid, true);
box = new Bounding_box( 0.423*xmid, ymid/3,
1.577*xmid+1, ymid/3,
1.577*xmid+1, 5*ymid/3+1,
0.423*xmid, 5*ymid/3+1);
}
// ========================= Circle =============================
void Circle::transform (Affine_map T) {
Q1.transform(T);
box->transform(T);
}
void Circle::gen_bitmap() {
CPU_Shape::gen_bitmap();
int x, y;
for (int i = 0; i < xmax*ymax; i++) {
x = (i % xmax) * 1 ;
y = (i / xmax) * 1 ;
if ( Q1.in_halfspace(x,y) )
bitmap[i] = 1;
else bitmap[i] = 0;
}
}
void Circle::copy (Shape* s) {
Circle* c = dynamic_cast<Circle*>(s);
this->Q1 = c->Q1;
*this->box = *c->box;
}
Circle::Circle () {
Q1 = Quadratic_Eq(1, 0, 1, -2*xmid, -2*ymid, xmid*xmid + 0.75*ymid*ymid, false);
box = new Bounding_box( 0.5*xmid, 0.5*ymid,
3*xmid/2+1, 0.5*ymid,
3*xmid/2+1, 3*ymid/2+1,
0.5*xmid, 3*ymid/2+1);
}
// ========================= Circle_II =============================
void Circle_II::transform (Affine_map T) {
Q1.transform(T); Q2.transform(T);
box->transform(T);
}
void Circle_II::gen_bitmap() {
CPU_Shape::gen_bitmap();
int x, y;
for (int i = 0; i < xmax*ymax; i++) {
x = (i % xmax) * 1 ;
y = (i / xmax) * 1 ;
if ( Q1.in_halfspace(x,y) &&
!Q2.in_halfspace(x,y) )
bitmap[i] = 1;
else bitmap[i] = 0;
}
}
void Circle_II::copy (Shape* s) {
Circle_II* c = dynamic_cast<Circle_II*>(s);
this->Q1 = c->Q1; this->Q2 = c->Q2;
*this->box = *c->box;
}
Circle_II::Circle_II () {
Q1 = Quadratic_Eq(1, 0, 1, -2*xmid, -2*ymid, xmid*xmid + 0.75*ymid*ymid, false);
Q2 = Quadratic_Eq(1, 0, 1, -2*xmid, -2*ymid, xmid*xmid + 0.9*ymid*ymid, false);
box = new Bounding_box( 0.5*xmid, 0.5*ymid,
3*xmid/2+1, 0.5*ymid,
3*xmid/2+1, 3*ymid/2+1,
0.5*xmid, 3*ymid/2+1);
}
// ========================= Circle_III =============================
void Circle_III::transform (Affine_map T) {
Q1.transform(T); Q2.transform(T); Q3.transform(T);
box->transform(T);
}
void Circle_III::gen_bitmap() {
CPU_Shape::gen_bitmap();
int x, y;
for (int i = 0; i < xmax*ymax; i++) {
x = (i % xmax) * 1 ;
y = (i / xmax) * 1 ;
if ( Q1.in_halfspace(x,y) &&
(!Q2.in_halfspace(x,y) || Q3.in_halfspace(x,y)) )
bitmap[i] = 1;
else bitmap[i] = 0;
}
}
void Circle_III::copy (Shape* s) {
Circle_III* c = dynamic_cast<Circle_III*>(s);
this->Q1 = c->Q1; this->Q2 = c->Q2; this->Q3 = c->Q3;
*this->box = *c->box;
}
Circle_III::Circle_III () {
Q1 = Quadratic_Eq(1, 0, 1, -2*xmid, -2*ymid, xmid*xmid + 0.75*ymid*ymid, false);
Q2 = Quadratic_Eq(1, 0, 1, -2*xmid, -2*ymid, xmid*xmid + 0.9*ymid*ymid, false);
Q3 = Quadratic_Eq(1, 0, 1, -2*xmid, -2*ymid, xmid*xmid + 0.95*ymid*ymid, false);
box = new Bounding_box( 0.5*xmid, 0.5*ymid,
3*xmid/2+1, 0.5*ymid,
3*xmid/2+1, 3*ymid/2+1,
0.5*xmid, 3*ymid/2+1);
}
// ========================= Circle_IV =============================
void Circle_IV::transform (Affine_map T) {
box->transform(T);
Q1.transform(T); Q2.transform(T); Q3.transform(T); Q4.transform(T);
}
void Circle_IV::gen_bitmap() {
CPU_Shape::gen_bitmap();
int x, y;
for (int i = 0; i < xmax*ymax; i++) {
x = (i % xmax) * 1 ;
y = (i / xmax) * 1 ;
if ( Q1.in_halfspace(x,y) &&
(!Q2.in_halfspace(x,y) || ( Q3.in_halfspace(x,y) && !Q4.in_halfspace(x,y) )) )
bitmap[i] = 1;
else bitmap[i] = 0;
}
}
void Circle_IV::copy (Shape* s) {
Circle_IV* c = dynamic_cast<Circle_IV*>(s);
this->Q1 = c->Q1; this->Q2 = c->Q2; this->Q3 = c->Q3; this->Q4 = c->Q4;
*this->box = *c->box;
}
Circle_IV::Circle_IV () {
Q1 = Quadratic_Eq(1, 0, 1, -2*xmid, -2*ymid, xmid*xmid + 0.75*ymid*ymid, false);
Q2 = Quadratic_Eq(1, 0, 1, -2*xmid, -2*ymid, xmid*xmid + 0.9*ymid*ymid, false);
Q3 = Quadratic_Eq(1, 0, 1, -2*xmid, -2*ymid, xmid*xmid + 0.95*ymid*ymid, false);
Q4 = Quadratic_Eq(1, 0, 1, -2*xmid, -2*ymid, xmid*xmid + 0.995*ymid*ymid, false);
box = new Bounding_box( 0.5*xmid, 0.5*ymid,
3*xmid/2+1, 0.5*ymid,
3*xmid/2+1, 3*ymid/2+1,
0.5*xmid, 3*ymid/2+1);
}
// ========================= Rhombus =============================
void Rhombus::transform (Affine_map T) {
L1.transform(T); L2.transform(T); L3.transform(T); L4.transform(T);
L5.transform(T); L6.transform(T); L7.transform(T); L8.transform(T);
box->transform(T);
}
void Rhombus::gen_bitmap() {
CPU_Shape::gen_bitmap();
int x, y;
for (int i = 0; i < xmax*ymax; i++) {
x = (i % xmax) * 1 ;
y = (i / xmax) * 1 ;
if ( L1.in_halfspace(x,y) &&
L2.in_halfspace(x,y) &&
L3.in_halfspace(x,y) &&
L4.in_halfspace(x,y) &&
!(L5.in_halfspace(x,y) &&
L6.in_halfspace(x,y) &&
L7.in_halfspace(x,y) &&
L8.in_halfspace(x,y)) )
bitmap[i] = 1;
else bitmap[i] = 0;
}
}
void Rhombus::copy (Shape* s) {
Rhombus* r = dynamic_cast<Rhombus*>(s);
this->L1 = r->L1; this->L2 = r->L2; this->L3 = r->L3; this->L4 = r->L4;
this->L5 = r->L5; this->L6 = r->L6; this->L7 = r->L7; this->L8 = r->L8;
*this->box = *r->box;
}
Rhombus::Rhombus () {
L1 = Linear_Eq(1, 1, -xmid - ymid/3, true);
L2 = Linear_Eq(1, -1, -xmid+ymid/3, false);
L3 = Linear_Eq(1, 1, -xmid-5*ymid/3, false);
L4 = Linear_Eq(1,-1,-xmid+5*ymid/3, true);
L5 = Linear_Eq(1, 1, -xmid - 2*ymid/3, true);
L6 = Linear_Eq(1, -1, -xmid+2*ymid/3 , false);
L7 = Linear_Eq(1, 1, -xmid-4*ymid/3, false);
L8 = Linear_Eq(1, -1, -xmid + 4*ymid/3, true);
box = new Bounding_box( xmid/3, ymid/3,
5*xmid/3+1, ymid/3,
5*xmid/3+1, 5*ymid/3+1,
xmid/3, 5*ymid/3+1);
}
// ========================= Rhombus_II =============================
void Rhombus_II::transform (Affine_map T) {
L1.transform(T); L2.transform(T); L3.transform(T); L4.transform(T);
L5.transform(T); L6.transform(T); L7.transform(T);
box->transform(T);
}
void Rhombus_II::gen_bitmap() {
CPU_Shape::gen_bitmap();
int x, y;
for (int i = 0; i < xmax*ymax; i++) {
x = (i % xmax) * 1 ;
y = (i / xmax) * 1 ;
if ( L1.in_halfspace(x,y) &&
L2.in_halfspace(x,y) &&
L3.in_halfspace(x,y) &&
L4.in_halfspace(x,y) &&
!(L5.in_halfspace(x,y) &&
L6.in_halfspace(x,y) &&
L7.in_halfspace(x,y)) )
bitmap[i] = 1;
else bitmap[i] = 0;
}
}
void Rhombus_II::copy (Shape* s) {
Rhombus_II* r = dynamic_cast<Rhombus_II*>(s);
this->L1 = r->L1; this->L2 = r->L2; this->L3 = r->L3; this->L4 = r->L4;
this->L5 = r->L5; this->L6 = r->L6; this->L7 = r->L7;
*this->box = *r->box;
}
Rhombus_II::Rhombus_II () {
L1 = Linear_Eq(1, 1, -xmid - ymid/3, true);
L2 = Linear_Eq(1, -1, -xmid+ymid/3, false);
L3 = Linear_Eq(1, 1, -xmid-5*ymid/3, false);
L4 = Linear_Eq(1,-1,-xmid+5*ymid/3, true);
L5 = Linear_Eq(1, 1, -xmid - ymid/2, true);
L6 = Linear_Eq(1, -1, -xmid + 3*ymid/2, true);
L7 = Linear_Eq(1, 0, -xmid, false);
box = new Bounding_box( xmid/3, ymid/3,
5*xmid/3+1, ymid/3,
5*xmid/3+1, 5*ymid/3+1,
xmid/3, 5*ymid/3+1);
}
// ========================= Rhombus_III =============================
void Rhombus_III::transform (Affine_map T) {
L1.transform(T); L2.transform(T); L3.transform(T);
L4.transform(T); L5.transform(T); L6.transform(T);
L7.transform(T); L8.transform(T); L9.transform(T);
L10.transform(T); L11.transform(T); L12.transform(T);
box->transform(T);
}
void Rhombus_III::gen_bitmap() {
CPU_Shape::gen_bitmap();
int x, y;
for (int i = 0; i < xmax*ymax; i++) {
x = (i % xmax) * 1 ;
y = (i / xmax) * 1 ;
if ( L1.in_halfspace(x,y) && L2.in_halfspace(x,y) && L3.in_halfspace(x,y) && L4.in_halfspace(x,y) &&
!(L5.in_halfspace(x,y) && L6.in_halfspace(x,y) && L9.in_halfspace(x,y)) &&
!(L5.in_halfspace(x,y) && L8.in_halfspace(x,y) && L10.in_halfspace(x,y)) &&
!(L7.in_halfspace(x,y) && L6.in_halfspace(x,y) && L11.in_halfspace(x,y)) &&
!(L7.in_halfspace(x,y) && L8.in_halfspace(x,y) && L12.in_halfspace(x,y)) )
bitmap[i] = 1;
else bitmap[i] = 0;
}
}
void Rhombus_III::copy (Shape* s) {
Rhombus_III* r = dynamic_cast<Rhombus_III*>(s);
this->L1 = r->L1; this->L2 = r->L2; this->L3 = r->L3; this->L4 = r->L4;
this->L5 = r->L5; this->L6 = r->L6; this->L7 = r->L7; this->L8 = r->L8;
this->L9 = r->L9; this->L10 = r->L10; this->L11 = r->L11; this->L12 = r->L12;
*this->box = *r->box;
}
Rhombus_III::Rhombus_III () {
L1 = Linear_Eq(1, 1, -xmid - ymid/3, true);
L2 = Linear_Eq(1, -1, -xmid+ymid/3, false);
L3 = Linear_Eq(1, 1, -xmid-5*ymid/3, false);
L4 = Linear_Eq(1,-1,-xmid+5*ymid/3, true);
L5 = Linear_Eq(1, 0, -15*xmid/16, false);
L6 = Linear_Eq(0, 1, -15*ymid/16, false);
L7 = Linear_Eq(1, 0, -17*xmid/16, true);
L8 = Linear_Eq(0, 1, -17*ymid/16, true);
L9 = Linear_Eq(1, 1, -xmid - ymid/2, true);
L10 = Linear_Eq(1, -1, -xmid + 3*ymid/2, true);
L11 = Linear_Eq(1, -1, -xmid+ymid/2 , false);
L12 = Linear_Eq(1, 1, -xmid-3*ymid/2, false);
box = new Bounding_box( xmid/3, ymid/3,
5*xmid/3+1, ymid/3,
5*xmid/3+1, 5*ymid/3+1,
xmid/3, 5*ymid/3+1);
}
// ========================= Rhombus_IV =============================
void Rhombus_IV::transform (Affine_map T) {
L1.transform(T); L2.transform(T); L3.transform(T);
L4.transform(T); L5.transform(T); L6.transform(T);
box->transform(T);
}
void Rhombus_IV::gen_bitmap() {
CPU_Shape::gen_bitmap();
int x, y;
for (int i = 0; i < xmax*ymax; i++) {
x = (i % xmax) * 1 ;
y = (i / xmax) * 1 ;
if ( (L1.in_halfspace(x,y) &&
L2.in_halfspace(x,y) &&
L3.in_halfspace(x,y)) ||
(L4.in_halfspace(x,y) &&
L5.in_halfspace(x,y) &&
L6.in_halfspace(x,y)) )
bitmap[i] = 1;
else bitmap[i] = 0;
}
}
void Rhombus_IV::copy (Shape* s) {
Rhombus_IV* r = dynamic_cast<Rhombus_IV*>(s);
this->L1 = r->L1; this->L2 = r->L2; this->L3 = r->L3; this->L4 = r->L4;
this->L5 = r->L5; this->L6 = r->L6;
*this->box = *r->box;
}
Rhombus_IV::Rhombus_IV () {
L1 = Linear_Eq(1, -1, ymid - xmid, true);
L2 = Linear_Eq(1, 1, -ymid - xmid, false);
L3 = Linear_Eq(0, 1, -ymid/2, true);
L4 = Linear_Eq(1, -1, ymid-xmid, false);
L5 = Linear_Eq(1, 1, -ymid - xmid, true);
L6 = Linear_Eq(0, 1, -3*ymid/2, false);
box = new Bounding_box( xmid/2, ymid/2,
3*xmid/2+1, ymid/2,
3*xmid/2+1, 3*ymid/2+1,
xmid/2, 3*ymid/2+1);
}
// ========================= Diamond =============================
void Diamond::transform (Affine_map T) {
Q1.transform(T); Q2.transform(T); Q3.transform(T); Q4.transform(T);
L1.transform(T); L2.transform(T); L3.transform(T); L4.transform(T);
box->transform(T);
}
void Diamond::gen_bitmap() {
CPU_Shape::gen_bitmap();
int x, y;
for (int i = 0; i < xmax*ymax; i++) {
x = (i % xmax) * 1 ;
y = (i / xmax) * 1 ;
if ( Q1.in_halfspace(x,y) && Q2.in_halfspace(x,y) &&
Q3.in_halfspace(x,y) && Q4.in_halfspace(x,y) &&
L1.in_halfspace(x,y) && L2.in_halfspace(x,y) &&
L3.in_halfspace(x,y) && L4.in_halfspace(x,y) )
bitmap[i] = 1;
else bitmap[i] = 0;
}
}
void Diamond::copy (Shape* s) {
Diamond* d = dynamic_cast<Diamond*>(s);
this->Q1 = d->Q1; this->Q2 = d->Q2; this->Q3 = d->Q3; this->Q4 = d->Q4;
this->L1 = d->L1; this->L2 = d->L2; this->L3 = d->L3; this->L4 = d->L4;
*this->box = *d->box;
}
Diamond::Diamond () {
float x1 = xmid/4, A1 = -8*ymid/(3*xmid*(3*xmid - 4*x1)), B1 = ymid/3 - A1 * (xmid-x1)*(xmid-x1),
x2 = 7*xmid/4, A2 = 8*ymid/(3*xmid*(5*xmid - 4*x2)), B2 = ymid/3 - A2 * (xmid-x2)*(xmid-x2),
x3 = x1, A3 = -A1, B3 = 5*ymid/3 - A3 * (xmid-x3)*(xmid-x3),
x4 = x2, A4 = -A2, B4 = 5*ymid/3 - A4 * (xmid-x4)*(xmid-x4);
Q1 = Quadratic_Eq(A1, 0, 0, -2*A1*x1, -1, A1*x1*x1 + B1, false);
Q2 = Quadratic_Eq(A2, 0, 0, -2*A2*x2, -1, A2*x2*x2 + B2, false);
Q3 = Quadratic_Eq(A3, 0, 0, -2*A3*x3, -1, A3*x3*x3 + B3, true);
Q4 = Quadratic_Eq(A4, 0, 0, -2*A4*x4, -1, A4*x4*x4 + B4, true);
L1 = Linear_Eq(1, 0, -xmid/2, true);
L2 = Linear_Eq(1, 0, -3*xmid/2, false);
L3 = Linear_Eq(0, 1, -ymid/3, true);
L4 = Linear_Eq(0, 1, -5*ymid/3, false);
box = new Bounding_box( xmid/2, ymid/3,
3*xmid/2+1, ymid/3,
3*xmid/2+1, 5*ymid/3+1,
xmid/2, 5*ymid/3+1);
}
// ========================= Club =============================
void Club::transform (Affine_map T) {
Q1.transform(T); Q2.transform(T); Q3.transform(T); Q4.transform(T); Q5.transform(T);
L1.transform(T); L2.transform(T); L3.transform(T); L4.transform(T);
L5.transform(T); L6.transform(T); L7.transform(T);
box->transform(T);
}
void Club::gen_bitmap() {
CPU_Shape::gen_bitmap();
int x, y;
for (int i = 0; i < xmax*ymax; i++) {
x = (i % xmax) * 1 ;
y = (i / xmax) * 1 ;
if ( (Q1.in_halfspace(x,y) || Q2.in_halfspace(x,y) || Q3.in_halfspace(x,y) ||
(Q4.in_halfspace(x,y) && Q5.in_halfspace(x,y) && L5.in_halfspace(x,y) && L6.in_halfspace(x,y) && L7.in_halfspace(x,y))) &&
L1.in_halfspace(x,y) && L2.in_halfspace(x,y) &&
L3.in_halfspace(x,y) && L4.in_halfspace(x,y) )
bitmap[i] = 1;
else bitmap[i] = 0;
}
}
void Club::copy (Shape* s) {
Club* c = dynamic_cast<Club*>(s);
this->Q1 = c->Q1; this->Q2 = c->Q2; this->Q3 = c->Q3; this->Q4 = c->Q4; this->Q5 = c->Q5;
this->L1 = c->L1; this->L2 = c->L2; this->L3 = c->L3; this->L4 = c->L4;
this->L5 = c->L5; this->L6 = c->L6; this->L7 = c->L7;
*this->box = *c->box;
}
Club::Club () {
float A = - 9*ymid/(2*xmid*xmid), B = 7*ymid/4, x0 = 2*xmid/3, x1 = 4*xmid/3;
// Three circles
Q1 = Quadratic_Eq(1, 0, 1, -2*xmid, -5*ymid/4, xmid*xmid + 161*ymid*ymid/576, false);
Q2 = Quadratic_Eq(1, 0, 1, -3*xmid/2, -9*ymid/4, 9*xmid*xmid/16 + 665*ymid*ymid/576, false);
Q3 = Quadratic_Eq(1, 0, 1, -5*xmid/2, -9*ymid/4, 25*xmid*xmid/16 + 665*ymid*ymid/576, false);
// The stem
Q4 = Quadratic_Eq(A, 0, 0, -2*A*x0, -1, A*x0*x0 + B, false);
Q5 = Quadratic_Eq(A, 0, 0, -2*A*x1, -1, A*x1*x1 + B, false);
L5 = Linear_Eq(1, 0, -2*xmid/3, true);
L6 = Linear_Eq(1, 0, -4*xmid/3, false);
L7 = Linear_Eq(0, 1, -5*ymid/3, false);
// Boundaries
L1 = Linear_Eq(1, 0, -5*xmid/12, true);
L2 = Linear_Eq(1, 0, -19*xmid/12, false);
L3 = Linear_Eq(0, 1, -7*ymid/24, true);
L4 = Linear_Eq(0, 1, -5*ymid/3, false);
box = new Bounding_box( 5*xmid/12, 7*ymid/24,
19*xmid/12+1, 7*ymid/24,
19*xmid/12+1, 5*ymid/3+1,
5*xmid/12, 5*ymid/3+1);
}
// ========================= Heart =============================
void Heart::transform (Affine_map T) {
Q1.transform(T); Q2.transform(T); C3.transform(T); C4.transform(T);
L11.transform(T); L12.transform(T); L21.transform(T); L22.transform(T);
L31.transform(T); L32.transform(T); L41.transform(T); L42.transform(T);
box->transform(T);
}
void Heart::gen_bitmap() {
CPU_Shape::gen_bitmap();
int x, y;
for (int i = 0; i < xmax*ymax; i++) {
x = (i % xmax) * 1 ;
y = (i / xmax) * 1 ;
if ( (Q1.in_halfspace(x,y) && L11.in_halfspace(x,y) && L12.in_halfspace(x,y)) ||
(Q2.in_halfspace(x,y) && L21.in_halfspace(x,y) && L22.in_halfspace(x,y)) ||
(C3.in_halfspace(x,y) && L31.in_halfspace(x,y) && L32.in_halfspace(x,y)) ||
(C4.in_halfspace(x,y) && L41.in_halfspace(x,y) && L42.in_halfspace(x,y)) )
bitmap[i] = 1;
else bitmap[i] = 0;
}
}
void Heart::copy (Shape* s) {
Heart* h = dynamic_cast<Heart*>(s);
this->Q1 = h->Q1; this->Q2 = h->Q2; this->C3 = h->C3; this->C4 = h->C4;
this->L11 = h->L11; this->L12 = h->L12; this->L21 = h->L21; this->L22 = h->L22;
this->L31 = h->L31; this->L32 = h->L32; this->L41 = h->L41; this->L42 = h->L42;
*this->box = *h->box;
}
Heart::Heart () {
float A = xmid/3 + xmid/25,
B = ymid/2,
C = (5*ymid) / (4*xmid*((2*xmid/5)*(2*xmid/5) + 999)),
D = 11*ymid/8,
x0 = 2*xmid/3 + xmid/25, y0 = 7*ymid/8,
x1 = 4*xmid/3 - xmid/25, y1 = 7*ymid/8;
Q1 = Quadratic_Eq(B*B, 0, A*A, -B*B*2*x0, -A*A*2*y0, B*B*x0*x0 + A*A*y0*y0 - A*A*B*B, false);
Q2 = Quadratic_Eq(B*B, 0, A*A, -B*B*2*x1, -A*A*2*y1, B*B*x1*x1 + A*A*y1*y1 - A*A*B*B, false);
// C3, non-expanded: ( ( C * (x - 2*xm/3) * 0.001 * ( (x-2*xm/3)*(x-2*xm/3) + 999) + D - y ) >= 0 );
C3 = Cubic_Eq(C, 0, 0, 0, -2*C*xmid, 0, 0, C*(1.333*xmid*xmid+999), -1, D - C*(8*xmid*xmid*xmid/27 + 666*xmid), true);
// x^3 x^2 xy y^2 x y 1
C4 = Cubic_Eq(-C, 0, 0, 0, 4*C*xmid, 0, 0, -C*(16*xmid*xmid/3 + 999), -1, D + C*(64*xmid*xmid*xmid/27 + 1332*xmid), true);
L11 = Linear_Eq(0, 1, -ymid, false);
L12 = Linear_Eq(1, 0, -xmid, false);
L21 = L11;
L22 = Linear_Eq(1, 0, -xmid, true);
L31 = Linear_Eq(0, 1, -ymid, true);
L32 = L12;
L41 = L31;
L42 = L22;
float ybot = C*xmid/3*( xmid*xmid/9 + 999 ) + D;
box = new Bounding_box( x0-A, y0-B,
x1+A+1, y0-B,
x1+A+1, ybot+1,
x0-A, ybot+1);
}
// ========================= Spade =============================
void Spade::transform (Affine_map T) {
Q1.transform(T); Q2.transform(T); C3.transform(T); C4.transform(T);
L11.transform(T); L12.transform(T); L21.transform(T); L22.transform(T);
L31.transform(T); L32.transform(T); L41.transform(T); L42.transform(T);
Q5.transform(T); Q6.transform(T);
L1.transform(T); L2.transform(T); L3.transform(T);
box->transform(T);
}
void Spade::gen_bitmap() {
CPU_Shape::gen_bitmap();
int x, y;
for (int i = 0; i < xmax*ymax; i++) {
x = (i % xmax) * 1 ;
y = (i / xmax) * 1 ;
if ( (Q1.in_halfspace(x,y) && L11.in_halfspace(x,y) && L12.in_halfspace(x,y)) ||
(Q2.in_halfspace(x,y) && L21.in_halfspace(x,y) && L22.in_halfspace(x,y)) ||
(C3.in_halfspace(x,y) && L31.in_halfspace(x,y) && L32.in_halfspace(x,y)) ||
(C4.in_halfspace(x,y) && L41.in_halfspace(x,y) && L42.in_halfspace(x,y)) ||
(Q5.in_halfspace(x,y) && Q6.in_halfspace(x,y) && L1.in_halfspace(x,y) && L2.in_halfspace(x,y) && L3.in_halfspace(x,y)) )
bitmap[i] = 1;
else bitmap[i] = 0;
}
}
void Spade::copy (Shape* s) {
Spade* h = dynamic_cast<Spade*>(s);
this->Q1 = h->Q1; this->Q2 = h->Q2; this->C3 = h->C3; this->C4 = h->C4;
this->Q5 = h->Q5; this->Q6 = h->Q6;
this->L11 = h->L11; this->L12 = h->L12; this->L21 = h->L21; this->L22 = h->L22;
this->L31 = h->L31; this->L32 = h->L32; this->L41 = h->L41; this->L42 = h->L42;
this->L1 = h->L1; this->L2 = h->L2; this->L3 = h->L3;
*this->box = *h->box;
}
Spade::Spade () {
// The upside-down heart
float A = xmid/3 + xmid/25,
B = 2*ymid/5,
C = (5*ymid) / (4*xmid*((2*xmid/5)*(2*xmid/5) + 999)),
D = 11*ymid/8 - 2*ymid,
x0 = 2*xmid/3 + xmid/25, y0 = 17*ymid/16, // Centers of the two ellipses
x1 = 4*xmid/3 - xmid/25, y1 = 17*ymid/16;
Q1 = Quadratic_Eq(B*B, 0, A*A, -B*B*2*x0, -A*A*2*y0, B*B*x0*x0 + A*A*y0*y0 - A*A*B*B, false);
Q2 = Quadratic_Eq(B*B, 0, A*A, -B*B*2*x1, -A*A*2*y1, B*B*x1*x1 + A*A*y1*y1 - A*A*B*B, false);
C3 = Cubic_Eq(C, 0, 0, 0, -2*C*xmid, 0, 0, C*(1.333*xmid*xmid+999), 1, D - C*(8*xmid*xmid*xmid/27 + 666*xmid), true);
// x^3 x^2 xy y^2 x y 1
C4 = Cubic_Eq(-C, 0, 0, 0, 4*C*xmid, 0, 0, -C*(16*xmid*xmid/3 + 999), 1, D + C*(64*xmid*xmid*xmid/27 + 1332*xmid), true);
L11 = Linear_Eq(0, 1, -ymid, true);
L12 = Linear_Eq(1, 0, -xmid, false);
L21 = L11;
L22 = Linear_Eq(1, 0, -xmid, true);
L31 = Linear_Eq(0, 1, -ymid, false);
L32 = L12;
L41 = L31;
L42 = L22;
// The stem
float E = - 9*ymid/(2*xmid*xmid), F = 7*ymid/4, x2 = 2*xmid/3, x3 = 4*xmid/3;
Q5 = Quadratic_Eq(E, 0, 0, -2*E*x2, -1, E*x2*x2 + F, false);
Q6 = Quadratic_Eq(E, 0, 0, -2*E*x3, -1, E*x3*x3 + F, false);
L1 = Linear_Eq(1, 0, -2*xmid/3, true);
L2 = Linear_Eq(1, 0, -4*xmid/3, false);
L3 = Linear_Eq(0, 1, -5*ymid/3, false);
float ytop = -C*xmid/3*( xmid*xmid/9 + 999 ) - D;
box = new Bounding_box( x0-A, ytop,
x1+A+1, ytop,
x1+A+1, 5*ymid/3+1,
x0-A, 5*ymid/3+1);
}
// =====================================================================
// =====================================================================
// =====================================================================
CPU_Shape* allocate_mem_by_shape_type(Shape_type type) {
CPU_Shape* shape_link = 0;
switch (type) {
case TRIANGLE: shape_link = new Triangle();
break;
case SQUARE: shape_link = new Square();
break;
case PENTAGON: shape_link = new Pentagon();
break;
case HEXAGON: shape_link = new Hexagon();
break;
case CIRCLE: shape_link = new Circle();
break;
case CIRCLE_II: shape_link = new Circle_II();
break;
case CIRCLE_III: shape_link = new Circle_III();
break;
case CIRCLE_IV: shape_link = new Circle_IV();
break;
case RHOMBUS: shape_link = new Rhombus();
break;
case RHOMBUS_II: shape_link = new Rhombus_II();
break;
case RHOMBUS_III: shape_link = new Rhombus_III();
break;
case RHOMBUS_IV: shape_link = new Rhombus_IV();
break;
case HEART: shape_link = new Heart();
break;
case DIAMOND: shape_link = new Diamond();
break;
case CLUB: shape_link = new Club();
break;
case SPADE: shape_link = new Spade();
break;
}
return shape_link;
}
std::string name_by_shape_type(Shape_type type) {
switch (type) {
case TRIANGLE: return "Triangle";
case SQUARE: return "Square";
case PENTAGON: return "Pentagon";
case HEXAGON: return "Hexagon";
case CIRCLE: return "Circle";
case CIRCLE_II: return "Circle II";
case CIRCLE_III: return "Circle III";
case CIRCLE_IV: return "Circle IV";
case RHOMBUS: return "Rhombus";
case RHOMBUS_II: return "Rhombus II";
case RHOMBUS_III: return "Rhombus III";
case RHOMBUS_IV: return "Rhombus IV";
case HEART: return "Heart";
case DIAMOND: return "Diamond";
case CLUB: return "Club";
case SPADE: return "Spade";
default: return "Unrecognized shape in name_by_shape_type"; // Error
}
}
std::string file_name_by_shape_type(Shape_type type) {
switch (type) {
case TRIANGLE: return "triangle";
case SQUARE: return "square";
case PENTAGON: return "pentagon";
case HEXAGON: return "hexagon";
case CIRCLE: return "circle";
case CIRCLE_II: return "circleii";
case CIRCLE_III: return "circleiii";
case CIRCLE_IV: return "circleiv";
case RHOMBUS: return "rhombus";
case RHOMBUS_II: return "rhombusii";
case RHOMBUS_III: return "rhombusiii";
case RHOMBUS_IV: return "rhombusiv";
case HEART: return "heart";
case DIAMOND: return "diamond";
case CLUB: return "club";
case SPADE: return "spade";
default: return "Unrecognized shape in file_name_by_shape_type"; // Error
}
}
|
9bc399cf108953ef94fff84443640d1bd3607bd4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <stdio.h>
/**
* Given a device array of integers, compute the index of first nonzero
* entry in the array, from left to right.
*
* For example, opearting on the array
*
* 0 1 2 3 4 5 6
* [0, 0, 0, -1, 0, 0, 2]
*
* gets the index 3. The result is stored into deg_ptr (initial value is n).
*
*/
template <int N_THD>
__global__ void degree_ker(const int *X, int n, int* deg_ptr) {
int tid = blockIdx.x * N_THD + threadIdx.x;
if ((tid < n) && (X[tid] != 0)) {
atomicMin(deg_ptr, tid);
}
}
using namespace std;
int main(int argc, char** argv) {
int n = 30;
if (argc > 1) n = atoi(argv[1]);
int *X = new int[n+1]();
srand(time(NULL));
int r = rand() % n + 1;
for (int i = 0; i < n; ++i) { X[i] = i / r; }
X[n] = n;
//for (int i = 0; i <= n; ++i) printf("%2d ", i);
//printf("\n");
//for (int i = 0; i <= n; ++i) printf("%2d ", X[i]);
//printf("\n");
int *X_d;
hipMalloc((void **)&X_d, sizeof(int)*(n+1));
hipMemcpy(X_d, X, sizeof(int)*(n+1), hipMemcpyHostToDevice);
const int nthd = 16;
int nb = (n / nthd) + ((n % nthd) ? 1 : 0);
int *deg_dev = X_d + n;
hipLaunchKernelGGL(( degree_ker<nthd>), dim3(nb), dim3(nthd), 0, 0, X_d, n, deg_dev);
int deg;
hipMemcpy(°, deg_dev, sizeof(int), hipMemcpyDeviceToHost);
printf("r = %d, index = %d\n", r, deg);
delete [] X;
hipFree(X_d);
return 0;
}
| 9bc399cf108953ef94fff84443640d1bd3607bd4.cu | #include <iostream>
#include <stdio.h>
/**
* Given a device array of integers, compute the index of first nonzero
* entry in the array, from left to right.
*
* For example, opearting on the array
*
* 0 1 2 3 4 5 6
* [0, 0, 0, -1, 0, 0, 2]
*
* gets the index 3. The result is stored into deg_ptr (initial value is n).
*
*/
template <int N_THD>
__global__ void degree_ker(const int *X, int n, int* deg_ptr) {
int tid = blockIdx.x * N_THD + threadIdx.x;
if ((tid < n) && (X[tid] != 0)) {
atomicMin(deg_ptr, tid);
}
}
using namespace std;
int main(int argc, char** argv) {
int n = 30;
if (argc > 1) n = atoi(argv[1]);
int *X = new int[n+1]();
srand(time(NULL));
int r = rand() % n + 1;
for (int i = 0; i < n; ++i) { X[i] = i / r; }
X[n] = n;
//for (int i = 0; i <= n; ++i) printf("%2d ", i);
//printf("\n");
//for (int i = 0; i <= n; ++i) printf("%2d ", X[i]);
//printf("\n");
int *X_d;
cudaMalloc((void **)&X_d, sizeof(int)*(n+1));
cudaMemcpy(X_d, X, sizeof(int)*(n+1), cudaMemcpyHostToDevice);
const int nthd = 16;
int nb = (n / nthd) + ((n % nthd) ? 1 : 0);
int *deg_dev = X_d + n;
degree_ker<nthd><<<nb, nthd>>>(X_d, n, deg_dev);
int deg;
cudaMemcpy(°, deg_dev, sizeof(int), cudaMemcpyDeviceToHost);
printf("r = %d, index = %d\n", r, deg);
delete [] X;
cudaFree(X_d);
return 0;
}
|
886c2b1231d43450f74c321d9697369d57449537.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <vector_functions.h>
#include <hip/hip_texture_types.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <texture_fetch_functions.h>
#include <opencv2/core/cuda_types.hpp>
#include <opencv2/core/cuda.hpp>
#include <opencv2/cudawarping.hpp>
#include <opencv2/cudafilters.hpp>
#include <opencv2/cudaimgproc.hpp>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/extrema.h>
#include <thrust/transform.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/random.h>
#include <thrust/inner_product.h>
#include <thrust/execution_policy.h>
#include <iostream>
#include <cstdlib>
#include <ctime>
#include <cmath>
#include <mex.h>
#include "coarseToFinePoseEsti.h"
#include "apeCommon.h"
namespace ape {
static const int BLOCK_W = 8;
static const int BLOCK_H = 8;
static const int BLOCK_SIZE_2D = BLOCK_W*BLOCK_H;
static const int BLOCK_SIZE = 256;
static const int ORI_SAMPLE_NUM = 448;
// constant memory
__constant__ float2 const_tmp_coors[ORI_SAMPLE_NUM];
__constant__ float3 const_tmp_vals[ORI_SAMPLE_NUM];
// texture memory
texture<float4, hipTextureType2D, hipReadModeElementType> tex_img;
void coarseToFinePoseEstimation(const cv::cuda::GpuMat &tmp,
const cv::cuda::GpuMat &img,
int prm_lvls,
bool photo_inva,
bool verbose,
ApeParams *ape_params,
double *ex_mat) {
// bind texture memory
tex_img.addressMode[0] = hipAddressModeBorder;
tex_img.addressMode[1] = hipAddressModeBorder;
tex_img.filterMode = hipFilterModeLinear;
tex_img.normalized = false;
hipChannelFormatDesc cuda_channel_format_desc = hipCreateChannelDesc<float4>();
// poses
size_t num_poses;
thrust::device_vector<Pose> poses;
thrust::device_vector<float> dists;
thrust::host_vector<float> min_dists;
// parameters
const float2 tmp_real = make_float2(ape_params->tmp_real_w, ape_params->tmp_real_h);
const float factor = 1 / 1.511f;
const float2 constraint = (photo_inva) ? make_float2(0.075f, 0.15f) : make_float2(0.05f, 0.1f);
int level = 0;
int level_p = 0;
float begin_scale = 1.f/powf(4.f, prm_lvls-1.f);
for (float scale = begin_scale; scale <= 1; scale *= 4) {
if (verbose) {
mexPrintf("pyramid: %f\n", scale);
mexEvalString("drawnow;");
}
int sample_num = ORI_SAMPLE_NUM;
// rescale image
cv::cuda::GpuMat small_img, small_tmp;
ApeParams small_ape_params;
rescale(tmp, img, *ape_params, scale, &small_tmp, &small_img, &small_ape_params);
hipBindTexture2D(0, &tex_img, small_img.data, &cuda_channel_format_desc,
small_ape_params.iw, small_ape_params.ih, small_img.step);
// allocate sample memory
thrust::device_vector<float2> tmp_coors(sample_num, make_float2(0, 0));
thrust::device_vector<float3> tmp_vals(sample_num, make_float3(0, 0, 0));
int2 tmp_dim = make_int2(small_ape_params.tw, small_ape_params.th);
while (true) {
// initialize the net
if (level == 0)
createSet(small_ape_params, &poses, &num_poses);
++level;
randSample(small_tmp, tmp_dim, tmp_real, sample_num, &tmp_coors, &tmp_vals);
dists.resize(num_poses);
size_t ori_num_poses = num_poses;
calDist(poses,
make_float4(small_ape_params.fx, small_ape_params.fy, small_ape_params.cx, small_ape_params.cy),
tmp_real,
make_int2(small_ape_params.iw, small_ape_params.ih),
photo_inva,
num_poses,
sample_num,
&dists);
hipDeviceSynchronize();
auto dists_iter = thrust::min_element(dists.begin(), dists.end());
float min_dist = *dists_iter;
if (verbose) {
mexPrintf(" -- level %d -- epsilon %.3f, Number of Poses %d, Minimum Dist. %f\n",
level, small_ape_params.epsilon, num_poses, min_dist);
mexEvalString("drawnow;");
}
min_dists.push_back(min_dist);
// early termination
if ((min_dist < 0.005) || ((scale == 1) && (min_dist < 0.015)) ||
((level_p > 0) && (level_p != level) && (scale == 1) && (min_dist > calLastThreeTermsMean(min_dists, level-level_p)*0.97))) {
auto idx = dists_iter - dists.begin();
getExMat(poses[idx], ex_mat);
hipUnbindTexture(&tex_img);
return;
}
// get poses by distance
bool too_high_percentage = getPosesByDistance(dists,
min_dist,
small_ape_params.epsilon,
&poses,
&num_poses);
// expand the pose set for next round
// if the initial pose set is not decent enough, recreate another new epsilon-covering set with smaller epsilon
if ((level == 1)
&& ((too_high_percentage && (min_dist > constraint.x) && (ori_num_poses < 7500000))
|| ((min_dist > constraint.y) && (ori_num_poses < 5000000)))) {
small_ape_params.ShrinkNet(0.9f);
level = 0;
min_dists.clear();
}
else {
expandPoses(factor, &poses, &small_ape_params, &num_poses);
thrust::device_vector<float> tzs(num_poses, 0.0);
const size_t BLOCK_NUM = ((num_poses) - 1) / BLOCK_SIZE + 1;
hipLaunchKernelGGL(( fetchTzKernel), dim3(BLOCK_NUM), dim3(BLOCK_SIZE), 0, 0, thrust::raw_pointer_cast(poses.data()),
num_poses,
thrust::raw_pointer_cast(tzs.data()));
float pixelMaxMovement = small_ape_params.epsilon
* ::max(small_ape_params.fx, small_ape_params.fy)
* ::max(small_ape_params.tmp_real_w, small_ape_params.tmp_real_h)
* 2
/ (thrust::reduce(tzs.begin(), tzs.end()) / num_poses);
if (pixelMaxMovement < 1) {
level_p = level + 1;
ape_params->UpdateNet(small_ape_params);
break;
}
}
}
hipUnbindTexture(&tex_img);
}
calDist(poses,
make_float4(ape_params->fx, ape_params->fy, ape_params->cx, ape_params->cy),
tmp_real,
make_int2(ape_params->iw, ape_params->ih),
photo_inva,
num_poses,
ORI_SAMPLE_NUM,
&dists);
hipDeviceSynchronize();
auto dists_iter = thrust::min_element(dists.begin(), dists.end());
auto idx = dists_iter - dists.begin();
getExMat(poses[idx], ex_mat);
}
void rescale(const cv::cuda::GpuMat &tmp,
const cv::cuda::GpuMat &img,
const ApeParams &ape_params,
const float scale,
cv::cuda::GpuMat *small_tmp,
cv::cuda::GpuMat *small_img,
ApeParams *small_ape_params) {
cv::cuda::resize(img, *small_img, cv::Size(), scale, scale, CV_INTER_AREA);
cv::cuda::resize(tmp, *small_tmp, cv::Size(), scale, scale, CV_INTER_AREA);
*small_ape_params = ape_params;
small_ape_params->iw = small_img->cols;
small_ape_params->ih = small_img->rows;
small_ape_params->tw = small_tmp->cols;
small_ape_params->th = small_tmp->rows;
// modify intrinsic papameters
const float offset = -0.5f;
small_ape_params->fx = ape_params.fx * scale;
small_ape_params->fy = ape_params.fy * scale;
small_ape_params->cx = (ape_params.cx - offset) * scale + offset;
small_ape_params->cy = (ape_params.cy - offset) * scale + offset;
float tz_square = ape_params.max_tz * ape_params.min_tz;
float area = (2 * small_ape_params->fx * ape_params.tmp_real_w)
* (2 * small_ape_params->fy * ape_params.tmp_real_h) / tz_square;
float length = sqrt(area);
float total_variation = getTotalVariation(*small_tmp, small_ape_params->tw, small_ape_params->th, area);
cv::Ptr<cv::cuda::Filter> gaussian_filter_tmp = cv::cuda::createGaussianFilter(CV_32FC3, CV_32FC3, cv::Size(5, 5), 1);
cv::Ptr<cv::cuda::Filter> gaussian_filter_img = cv::cuda::createGaussianFilter(CV_32FC4, CV_32FC4, cv::Size(5, 5), 1);
while (total_variation > 8.42 * length) { // 8.42 is obtained emperically
gaussian_filter_tmp->apply(*small_tmp, *small_tmp);
gaussian_filter_img->apply(*small_img, *small_img);
total_variation = getTotalVariation(*small_tmp, small_ape_params->tw, small_ape_params->th, area);
}
}
float getTotalVariation(const cv::cuda::GpuMat &tmp,
int tw,
int th,
float area) {
// allocate
const int pxl_num = tw * th;
thrust::device_vector<float> variation(pxl_num, 0.0);
// kernel parameter for TV
dim3 b_dim(BLOCK_W, BLOCK_H);
dim3 g_dim((tw - 1) / BLOCK_W + 1, (th - 1) / BLOCK_H + 1);
hipLaunchKernelGGL(( variationKernel), dim3(g_dim), dim3(b_dim), 0, 0, tmp, make_int2(tw, th), thrust::raw_pointer_cast(variation.data()));
float total_variation = thrust::reduce(variation.begin(), variation.end()) / pxl_num * area;
return total_variation;
}
__global__
void variationKernel(const cv::cuda::PtrStepSz<float3> tmp,
int2 dim,
float* variation) {
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
const int tid = tidy * blockDim.x + tidx;
const int x = blockIdx.x * blockDim.x + tidx;
const int y = blockIdx.y * blockDim.y + tidy;
// store pixels in specific window in shared memory
// the window is expanded by on pixel from block
const int ww = BLOCK_W + 2;
const int wh = BLOCK_H + 2;
const int ws = ww * wh;
__shared__ float window[ws];
// move data to the shared memory
int x_begin = blockIdx.x * blockDim.x - 1;
int y_begin = blockIdx.y * blockDim.y - 1;
for (int i = tid; i < ws; i += BLOCK_SIZE_2D) {
int wx = (i % ww) + x_begin;
int wy = (i / ww) + y_begin;
if (wx < 0 || wx >= dim.x || wy < 0 || wy >= dim.y)
window[i] = 2;
else
window[i] = tmp(wy, wx).x;
}
__syncthreads();
// out of range
if (x >= dim.x || y >= dim.y)
return;
// find max difference between center pixel and surroundings
float max_diff = 0;
float value = window[(tidy + 1)*ww + (tidx + 1)];
for (int idy = 0, wi = tidy*ww + tidx; idy < 3; ++idy, wi += (BLOCK_W - 1)) {
for (int idx = 0; idx < 3; ++idx, ++wi) {
float surr = window[wi];
if (surr != 2) {
float diff = std::abs(value - surr);
if (diff > max_diff)
max_diff = diff;
}
}
}
variation[y*dim.x + x] = max_diff;
}
void randSample(const cv::cuda::GpuMat &tmp,
int2 tmp_dim,
float2 tmp_real,
int sample_num,
thrust::device_vector<float2> *tmp_coors,
thrust::device_vector<float3> *tmp_vals) {
// rand pixel
srand(time(NULL));
thrust::device_vector<int2> rand_coor(sample_num, make_int2(0, 0));
thrust::counting_iterator<int> i0(rand()/2);
thrust::transform(i0, i0 + sample_num, rand_coor.begin(), CoorRngFunctor(tmp_dim.x, tmp_dim.y));
// get pixel value and position
const int BLOCK_NUM = (sample_num - 1) / BLOCK_SIZE + 1;
hipLaunchKernelGGL(( randSampleKernel), dim3(BLOCK_NUM), dim3(BLOCK_SIZE), 0, 0, thrust::raw_pointer_cast(rand_coor.data()),
tmp,
tmp_dim,
tmp_real,
sample_num,
thrust::raw_pointer_cast(tmp_coors->data()),
thrust::raw_pointer_cast(tmp_vals->data()));
// bind to const mem
hipMemcpyToSymbol(const_tmp_coors, thrust::raw_pointer_cast(tmp_coors->data()), sizeof(float2)* sample_num, 0, hipMemcpyDeviceToDevice);
hipMemcpyToSymbol(const_tmp_vals, thrust::raw_pointer_cast(tmp_vals->data()), sizeof(float3)* sample_num, 0, hipMemcpyDeviceToDevice);
}
__global__
void randSampleKernel(const int2 *rand_coor,
const cv::cuda::PtrStepSz<float3> tmp,
int2 tmp_dim,
float2 tmp_real,
int sample_num,
float2 *tmp_coors,
float3 *tmp_vals) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= sample_num)
return;
int x = rand_coor[idx].x;
int y = rand_coor[idx].y;
tmp_vals[idx] = tmp(y, x);
float2 coor;
coor.x = (2 * float(x) + 1 - tmp_dim.x) / tmp_dim.x * tmp_real.x;
coor.y = -(2 * float(y) + 1 - tmp_dim.y) / tmp_dim.y * tmp_real.y;
tmp_coors[idx] = coor;
}
void createSet(const ApeParams &ape_params,
thrust::device_vector<Pose> *poses,
size_t *num_poses) {
// count
int count_total = 0;
thrust::host_vector<int4> count; // rz0 rz1 tx ty
// paramters
const float length = sqrt(ape_params.tmp_real_w * ape_params.tmp_real_w + ape_params.tmp_real_h * ape_params.tmp_real_h);
const int NUM_RZ0 = int((ape_params.max_rz - ape_params.min_rz) / ape_params.step.rz0) + 1;
const int NUM_RZ1 = int((ape_params.max_rz - ape_params.min_rz) / ape_params.step.rz1) + 1;
// counting
for (float tz = ape_params.min_tz; tz <= ape_params.max_tz; ) {
float bound_tx = fabs(ape_params.cx*tz / ape_params.fx - ape_params.tmp_real_h);
float bound_ty = fabs(ape_params.cy*tz / ape_params.fy - ape_params.tmp_real_h);
for (float rx = ape_params.min_rx; rx <= ape_params.max_rx; ) {
int num_rz0 = (rx != 0) ? NUM_RZ0 : 1;
int num_tx = int(2 * bound_tx / (ape_params.step.tx*(tz - length*sin(rx)))) + 1;
int num_ty = int(2 * bound_ty / (ape_params.step.ty*(tz - length*sin(rx)))) + 1;
count_total += (num_rz0 * NUM_RZ1 * num_tx * num_ty);
count.push_back(make_int4(num_rz0, NUM_RZ1, num_tx, num_ty));
float asin_value = 2 - 1 / (1 / (2 - sin(rx)) + ape_params.step.rx);
if (asin_value <= 1 && asin_value >= -1)
rx = asinf(asin_value);
else
rx = ape_params.max_rx + 1;
}
tz += tz*tz*ape_params.step.tz / (1 - ape_params.step.tz*tz);
}
// allocate
thrust::device_vector<bool> valids(count_total, false);
poses->resize(count_total);
// assignment
Pose* poses_ptr = thrust::raw_pointer_cast(poses->data());
bool* valids_ptr = thrust::raw_pointer_cast(valids.data());
auto count_iter = count.begin();
int begin_index = 0;
int area_thres = std::round(0.01 * ape_params.iw * ape_params.ih);
for (float tz = ape_params.min_tz; tz <= ape_params.max_tz; ) {
float bound_tx = fabs(ape_params.cx*tz / ape_params.fx - ape_params.tmp_real_h);
float bound_ty = fabs(ape_params.cy*tz / ape_params.fy - ape_params.tmp_real_h);
for (float rx = ape_params.min_rx; rx <= ape_params.max_rx; ) {
int num = (*count_iter).x * (*count_iter).y * (*count_iter).z * (*count_iter).w;
const int BLOCK_NUM = (num - 1) / BLOCK_SIZE + 1;
float2 bound = make_float2(bound_tx, bound_ty);
hipLaunchKernelGGL(( createSetKernel), dim3(BLOCK_NUM), dim3(BLOCK_SIZE), 0, 0, begin_index,
*count_iter,
num,
tz,
rx,
ape_params,
bound,
length,
area_thres,
poses_ptr,
valids_ptr);
begin_index += num;
++count_iter;
float asin_value = 2 - 1 / (1 / (2 - sin(rx)) + ape_params.step.rx);
if (asin_value <= 1 && asin_value >= -1)
rx = asinf(asin_value);
else
rx = ape_params.max_rx + 1;
}
tz += tz*tz*ape_params.step.tz / (1 - ape_params.step.tz*tz);
}
if (begin_index != count_total)
std::cerr << "error occur in 'createSet'!" << std::endl;
// remove non-valid poses
auto zip_it_valid_end = thrust::remove_if(
thrust::make_zip_iterator(thrust::make_tuple(poses->begin(), valids.begin())),
thrust::make_zip_iterator(thrust::make_tuple(poses->end(), valids.end())),
ValidFunctor()
);
poses->erase(thrust::get<0>(zip_it_valid_end.get_iterator_tuple()), poses->end());
*num_poses = poses->size();
}
__global__
void createSetKernel(int begin_index,
int4 count,
int num,
float tz,
float rx,
ApeParams ape_params,
float2 bound,
float length,
int area_thres,
Pose *poses,
bool *valids) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= num)
return;
const int num_rz0 = count.x;
const int num_rz1 = count.y;
const int num_tx = count.z;
const int num_ty = count.w;
const int id_rz0 = idx % num_rz0;
const int id_rz1 = (idx / num_rz0) % num_rz1;
const int id_ty = (idx / (num_rz0 * num_rz1)) % num_ty;
const int id_tx = (idx / (num_rz0 * num_rz1 * num_ty)) % num_tx;
Pose pose;
pose.rz0 = ape_params.min_rz + id_rz0*ape_params.step.rz0;
pose.rx = rx;
pose.rz1 = ape_params.min_rz + id_rz1*ape_params.step.rz1;
pose.tx = -bound.x + id_tx*ape_params.step.tx*(tz - length*sinf(rx));
pose.ty = -bound.y + id_ty*ape_params.step.ty*(tz - length*sinf(rx));
pose.tz = tz;
int index = idx + begin_index;
poses[index] = pose;
// calculate homography parameters
pose.rx += 3.1415926f;
// pre-compute sin and cos values
float cos_rz0 = cosf(pose.rz0);
float cos_rx = cosf(pose.rx);
float cos_rz1 = cosf(pose.rz1);
float sin_rz0 = sinf(pose.rz0);
float sin_rx = sinf(pose.rx);
float sin_rz1 = sinf(pose.rz1);
// z coordinate is y cross x, so add minus
float r11 = cos_rz0 * cos_rz1 - sin_rz0 * cos_rx * sin_rz1;
float r12 = -cos_rz0 * sin_rz1 - sin_rz0 * cos_rx * cos_rz1;
float r21 = sin_rz0 * cos_rz1 + cos_rz0 * cos_rx * sin_rz1;
float r22 = -sin_rz0 * sin_rz1 + cos_rz0 * cos_rx * cos_rz1;
float r31 = sin_rx * sin_rz1;
float r32 = sin_rx * cos_rz1;
// final transfomration
float t0 = ape_params.fx*r11 + ape_params.cx*r31;
float t1 = ape_params.fx*r12 + ape_params.cx*r32;
float t3 = ape_params.fx*pose.tx + ape_params.cx*pose.tz;
float t4 = ape_params.fy*r21 + ape_params.cy*r31;
float t5 = ape_params.fy*r22 + ape_params.cy*r32;
float t7 = ape_params.fy*pose.ty + ape_params.cy*pose.tz;
float t8 = r31;
float t9 = r32;
float t11 = pose.tz;
// reject transformations make template out of boundary
float inv_c1z = 1 / (t8*(-ape_params.tmp_real_w) + t9*(-ape_params.tmp_real_h) + t11);
float c1x = (t0*(-ape_params.tmp_real_w) + t1*(-ape_params.tmp_real_h) + t3) * inv_c1z;
float c1y = (t4*(-ape_params.tmp_real_w) + t5*(-ape_params.tmp_real_h) + t7) * inv_c1z;
float inv_c2z = 1 / (t8*(+ape_params.tmp_real_w) + t9*(-ape_params.tmp_real_h) + t11);
float c2x = (t0*(+ape_params.tmp_real_w) + t1*(-ape_params.tmp_real_h) + t3) * inv_c2z;
float c2y = (t4*(+ape_params.tmp_real_w) + t5*(-ape_params.tmp_real_h) + t7) * inv_c2z;
float inv_c3z = 1 / (t8*(+ape_params.tmp_real_w) + t9*(+ape_params.tmp_real_h) + t11);
float c3x = (t0*(+ape_params.tmp_real_w) + t1*(+ape_params.tmp_real_h) + t3) * inv_c3z;
float c3y = (t4*(+ape_params.tmp_real_w) + t5*(+ape_params.tmp_real_h) + t7) * inv_c3z;
float inv_c4z = 1 / (t8*(-ape_params.tmp_real_w) + t9*(+ape_params.tmp_real_h) + t11);
float c4x = (t0*(-ape_params.tmp_real_w) + t1*(+ape_params.tmp_real_h) + t3) * inv_c4z;
float c4y = (t4*(-ape_params.tmp_real_w) + t5*(+ape_params.tmp_real_h) + t7) * inv_c4z;
float minx = fminf(c1x, fminf(c2x, fminf(c3x, c4x)));
float maxx = fmaxf(c1x, fmaxf(c2x, fmaxf(c3x, c4x)));
float miny = fminf(c1y, fminf(c2y, fminf(c3y, c4y)));
float maxy = fmaxf(c1y, fmaxf(c2y, fmaxf(c3y, c4y)));
// reject transformations make marker too small in screen
float two_area = (c1x - c2x) * (c1y + c2y)
+ (c2x - c3x) * (c2y + c3y)
+ (c3x - c4x) * (c3y + c4y)
+ (c4x - c1x) * (c4y + c1y);
float area = abs(two_area / 2);
const int margin = 1;
if (area > area_thres
&& (minx >= margin)
&& (maxx <= ape_params.iw -1 - margin)
&& (miny >= margin)
&& (maxy <= ape_params.ih -1 - margin))
valids[index] = true;
else
valids[index] = false;
}
void calDist(const thrust::device_vector<Pose> &poses,
float4 in_params,
float2 tmp_real,
int2 img_dim,
bool photo_inva,
size_t num_poses,
size_t sample_num,
thrust::device_vector<float> *dists) {
const size_t BLOCK_NUM = (num_poses - 1) / BLOCK_SIZE + 1;
float inv_sample_num = 1.f / sample_num;
if (photo_inva) {
thrust::device_vector<float> tmp_y(sample_num);
hipLaunchKernelGGL(( getTmpY), dim3(int((sample_num - 1) / BLOCK_SIZE + 1)), dim3(BLOCK_SIZE) , 0, 0, thrust::raw_pointer_cast(tmp_y.data()), sample_num);
float sum_of_tmp_y = thrust::reduce(tmp_y.begin(), tmp_y.end());
float mean_tmp = sum_of_tmp_y * inv_sample_num;
float sum_of_sq_tmp_y = thrust::inner_product(thrust::device, tmp_y.begin(), tmp_y.end(), tmp_y.begin(), 0.f);
float sig_tmp = sqrt(fmaxf((sum_of_sq_tmp_y - (sum_of_tmp_y*sum_of_tmp_y) * inv_sample_num), 0.f) * inv_sample_num) + 1e-7f;
hipLaunchKernelGGL(( calDistInvarKernel), dim3(BLOCK_NUM), dim3(BLOCK_SIZE), 0, 0, thrust::raw_pointer_cast(poses.data()),
in_params,
tmp_real,
img_dim,
num_poses,
sample_num,
inv_sample_num,
mean_tmp,
sig_tmp,
thrust::raw_pointer_cast(dists->data()));
}
else {
hipLaunchKernelGGL(( calDistColorKernel), dim3(BLOCK_NUM), dim3(BLOCK_SIZE), 0, 0, thrust::raw_pointer_cast(poses.data()),
in_params,
tmp_real,
img_dim,
num_poses,
sample_num,
inv_sample_num,
thrust::raw_pointer_cast(dists->data()));
}
}
__global__
void getTmpY(float *tmp_y,
int sample_num) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= sample_num)
return;
tmp_y[idx] = const_tmp_vals[idx].x;
}
__global__
void calDistInvarKernel(const Pose *poses,
float4 in_params,
float2 tmp_real,
int2 img_dim,
size_t num_poses,
size_t sample_num,
float inv_sample_num,
float mean_tmp,
float sig_tmp,
float *dists) {
const size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= num_poses)
return;
// get pose parameter
float rz0 = poses[idx].rz0;
float rx = poses[idx].rx + 3.1415926f;
float rz1 = poses[idx].rz1;
float tx = poses[idx].tx;
float ty = poses[idx].ty;
float tz = poses[idx].tz;
float cos_rz0 = cosf(rz0);
float cos_rx = cosf(rx);
float cos_rz1 = cosf(rz1);
float sin_rz0 = sinf(rz0);
float sin_rx = sinf(rx);
float sin_rz1 = sinf(rz1);
// z coordinate is y cross x, so add minus
float r11 = cos_rz0 * cos_rz1 - sin_rz0 * cos_rx * sin_rz1;
float r12 = -cos_rz0 * sin_rz1 - sin_rz0 * cos_rx * cos_rz1;
float r21 = sin_rz0 * cos_rz1 + cos_rz0 * cos_rx * sin_rz1;
float r22 = -sin_rz0 * sin_rz1 + cos_rz0 * cos_rx * cos_rz1;
float r31 = sin_rx * sin_rz1;
float r32 = sin_rx * cos_rz1;
// final transfomration
float t0 = in_params.x*r11 + in_params.z*r31;
float t1 = in_params.x*r12 + in_params.z*r32;
float t3 = in_params.x*tx + in_params.z*tz;
float t4 = in_params.y*r21 + in_params.w*r31;
float t5 = in_params.y*r22 + in_params.w*r32;
float t7 = in_params.y*ty + in_params.w*tz;
float t8 = r31;
float t9 = r32;
float t11 = tz;
// calculate distance
float score = 0.f;
// parameters for normalization
float sum_of_img_y = 0;
float sum_of_sq_img_y = 0;
for (int i = 0; i < sample_num; ++i) {
// calculate coordinate on camera image
float inv_z = 1 / (t8*const_tmp_coors[i].x + t9*const_tmp_coors[i].y + t11);
float u = (t0*const_tmp_coors[i].x + t1*const_tmp_coors[i].y + t3) * inv_z;
float v = (t4*const_tmp_coors[i].x + t5*const_tmp_coors[i].y + t7) * inv_z;
// get value from constant memory
float3 tmp_val = const_tmp_vals[i];
// get value from texture
// have to add 0.5f for coordinates (see E.2 Linear Filtering in CUDA Programming Guide)
float4 img_val = tex2D(tex_img, u + 0.5f, v + 0.5f);
// accumulation for normalization
sum_of_img_y += img_val.x;
sum_of_sq_img_y += img_val.x*img_val.x;
float inv_num = 1.f / (i + 1);
float sig_img = sqrt(fmaxf((sum_of_sq_img_y - (sum_of_img_y*sum_of_img_y) * inv_num), 0.f) * inv_num) + 1e-7f;
float mean_img = sum_of_img_y * inv_num;
float sig_tmp_over_sig_img = sig_tmp / sig_img;
float faster = -mean_tmp + sig_tmp_over_sig_img * mean_img;
// calculate distant
score += 0.50f * abs(tmp_val.x - sig_tmp_over_sig_img*img_val.x + faster)
+ 0.25f * abs(img_val.y - tmp_val.y)
+ 0.25f *abs(img_val.z - tmp_val.z);
}
dists[idx] = score * inv_sample_num;
}
__global__
void calDistColorKernel(const Pose *poses,
float4 in_params,
float2 tmp_real,
int2 img_dim,
size_t num_poses,
size_t sample_num,
float inv_sample_num,
float *dists) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= num_poses)
return;
// get pose parameter
float rz0 = poses[idx].rz0;
float rx = poses[idx].rx + 3.1415926f;
float rz1 = poses[idx].rz1;
float tx = poses[idx].tx;
float ty = poses[idx].ty;
float tz = poses[idx].tz;
float cos_rz0 = cosf(rz0);
float cos_rx = cosf(rx);
float cos_rz1 = cosf(rz1);
float sin_rz0 = sinf(rz0);
float sin_rx = sinf(rx);
float sin_rz1 = sinf(rz1);
// z coordinate is y cross x, so add minus
float r11 = cos_rz0 * cos_rz1 - sin_rz0 * cos_rx * sin_rz1;
float r12 = -cos_rz0 * sin_rz1 - sin_rz0 * cos_rx * cos_rz1;
float r21 = sin_rz0 * cos_rz1 + cos_rz0 * cos_rx * sin_rz1;
float r22 = -sin_rz0 * sin_rz1 + cos_rz0 * cos_rx * cos_rz1;
float r31 = sin_rx * sin_rz1;
float r32 = sin_rx * cos_rz1;
// final transfomration
float t0 = in_params.x*r11 + in_params.z*r31;
float t1 = in_params.x*r12 + in_params.z*r32;
float t3 = in_params.x*tx + in_params.z*tz;
float t4 = in_params.y*r21 + in_params.w*r31;
float t5 = in_params.y*r22 + in_params.w*r32;
float t7 = in_params.y*ty + in_params.w*tz;
float t8 = r31;
float t9 = r32;
float t11 = tz;
// calculate distance
float score = 0.0;
for (int i = 0; i < sample_num; ++i) {
// calculate coordinate on camera image
float inv_z = 1 / (t8*const_tmp_coors[i].x + t9*const_tmp_coors[i].y + t11);
float u = (t0*const_tmp_coors[i].x + t1*const_tmp_coors[i].y + t3) * inv_z;
float v = (t4*const_tmp_coors[i].x + t5*const_tmp_coors[i].y + t7) * inv_z;
// get value from constant memory
float3 tmp_val = const_tmp_vals[i];
// get value from texture
// have to add 0.5f for coordinates (see E.2 Linear Filtering in CUDA Programming Guide)
float4 img_val = tex2D(tex_img, u + 0.5f, v + 0.5f);
// calculate distant
score += 0.50 * abs(img_val.x - tmp_val.x)
+ 0.25 * abs(img_val.y - tmp_val.y)
+ 0.25 * abs(img_val.z - tmp_val.z);
}
dists[idx] = score * inv_sample_num;
}
float calLastThreeTermsMean(thrust::host_vector<float> &min_dists,
int iter_times) {
float sum = 0;
int count = 0;
iter_times = (iter_times < 3) ? iter_times : 3;
for (auto it = min_dists.rbegin(); it != min_dists.rend() && count < iter_times; ++it, ++count) {
sum += *it;
}
return sum / count;
}
void getExMat(const Pose &pose,
double *ex_mat) {
float rz0 = pose.rz0;
float rx = pose.rx + 3.1415926f;
float rz1 = pose.rz1;
float sin_rz0 = sin(rz0);
float cos_rz0 = cos(rz0);
float sin_rx = sin(rx);
float cos_rx = cos(rx);
float sin_rz1 = sin(rz1);
float cos_rz1 = cos(rz1);
ex_mat[0] = double(cos_rz0*cos_rz1 - sin_rz0*cos_rx*sin_rz1);
ex_mat[4] = double(-cos_rz0*sin_rz1 - sin_rz0*cos_rx*cos_rz1);
ex_mat[8] = double(sin_rz0*sin_rx);
ex_mat[12] = double(pose.tx);
ex_mat[1] = double(sin_rz0*cos_rz1 + cos_rz0*cos_rx*sin_rz1);
ex_mat[5] = double(-sin_rz0*sin_rz1 + cos_rz0*cos_rx*cos_rz1);
ex_mat[9] = double(-cos_rz0*sin_rx);
ex_mat[13] = double(pose.ty);
ex_mat[2] = double(sin_rx*sin_rz1);
ex_mat[6] = double(sin_rx*cos_rz1);
ex_mat[10] = double(cos_rx);
ex_mat[14] = double(pose.tz);
ex_mat[3] = 0.0;
ex_mat[7] = 0.0;
ex_mat[11] = 0.0;
ex_mat[15] = 1.0;
}
bool getPosesByDistance(const thrust::device_vector<float> &dists,
float min_dist,
float epsilon,
thrust::device_vector<Pose> *poses,
size_t *num_poses) {
// get initial threhold
const float threshold = 0.19 * epsilon + 0.01372;
min_dist += threshold;
// count reductions
bool too_high_percentage = false;
bool first = true;
size_t count = INT_MAX;
thrust::device_vector<bool> survivals(*num_poses, false);
const size_t BLOCK_NUM = (*num_poses - 1) / BLOCK_SIZE + 1;
while (true) {
hipLaunchKernelGGL(( getPosesByDistanceKernel), dim3(BLOCK_NUM), dim3(BLOCK_SIZE), 0, 0, thrust::raw_pointer_cast(dists.data()),
min_dist,
dists.size(),
thrust::raw_pointer_cast(survivals.data()));
count = thrust::count(survivals.begin(), survivals.end(), true);
if (first) {
float percentage = float(count) / *num_poses;
too_high_percentage = (percentage > 0.1f);
first = false;
}
// reduce the size of pose set to prevent from out of memory
if (count < 27000) {
if (count == 0) {
auto dists_iter = thrust::min_element(dists.begin(), dists.end());
unsigned int position = dists_iter - dists.begin();
thrust::device_vector<Pose> temp_pose(1);
temp_pose[0] = (*poses)[position];
*poses = temp_pose;
*num_poses = 1;
}
else {
// prune poses
auto zip_it_valid_end = thrust::remove_if(
thrust::make_zip_iterator(thrust::make_tuple(poses->begin(), survivals.begin())),
thrust::make_zip_iterator(thrust::make_tuple(poses->end(), survivals.end())),
ValidFunctor()
);
poses->erase(thrust::get<0>(zip_it_valid_end.get_iterator_tuple()), poses->end());
*num_poses = count;
}
break;
}
min_dist *= 0.99f;
}
return too_high_percentage;
}
__global__
void getPosesByDistanceKernel(const float *dists,
float threshold,
size_t num_poses,
bool *survivals) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= num_poses)
return;
survivals[idx] = (dists[idx] < threshold) ? true : false;
}
void expandPoses(float factor,
thrust::device_vector<Pose> *poses,
ApeParams *ape_params,
size_t *num_poses) {
// number of expand points
const int multiple = 80;
size_t new_num_poses = (*num_poses) * (multiple + 1);
// decrease step
ape_params->ShrinkNet(factor);
// expand origin set
const size_t BLOCK_NUM = ((*num_poses) - 1) / BLOCK_SIZE + 1;
int area_thres = 0.01 * ape_params->iw * ape_params->ih;
thrust::device_vector<bool> valids(new_num_poses, true);
poses->resize(new_num_poses);
hipLaunchKernelGGL(( expandPosesKernel), dim3(BLOCK_NUM), dim3(BLOCK_SIZE), 0, 0, *num_poses,
new_num_poses,
*ape_params,
area_thres,
thrust::raw_pointer_cast(poses->data()),
thrust::raw_pointer_cast(valids.data()));
// remove invalid terms
auto zip_it_valid_end = thrust::remove_if(
thrust::make_zip_iterator(thrust::make_tuple(poses->begin(), valids.begin())),
thrust::make_zip_iterator(thrust::make_tuple(poses->end(), valids.end())),
ValidFunctor()
);
poses->erase(thrust::get<0>(zip_it_valid_end.get_iterator_tuple()), poses->end());
*num_poses = poses->size();
}
__global__
void expandPosesKernel(size_t num_poses,
size_t new_num_poses,
ApeParams ape_params,
int area_thres,
Pose *poses,
bool *valids) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= num_poses)
return;
hiprandState_t state;
hiprand_init(idx, 0, 0, &state);
float ori_rz0 = poses[idx].rz0;
float ori_rx = poses[idx].rx;
float ori_rz1 = poses[idx].rz1;
float ori_tx = poses[idx].tx;
float ori_ty = poses[idx].ty;
float ori_tz = poses[idx].tz;
for (unsigned int i = idx + num_poses; i < new_num_poses; i += num_poses) {
// rz0
Pose pose;
pose.rz0 = ori_rz0 + (hiprand(&state) % 3 - 1.f)*ape_params.step.rz0;
// rx
float is_plus = (hiprand(&state) % 3 - 1.f);
float sin_ori_rx = 2 - 1 / (1 / (2 - sinf(ori_rx)) + is_plus*ape_params.step.rx);
pose.rx = ori_rx + is_plus * is_plus * (asinf(sin_ori_rx) - ori_rx);
// rz1
pose.rz1 = ori_rz1 + (hiprand(&state) % 3 - 1.f)*ape_params.step.rz1;
// tx ty
float weight = ori_tz + sqrt(ape_params.tmp_real_w*ape_params.tmp_real_w + ape_params.tmp_real_h*ape_params.tmp_real_h) * sinf(ori_rx);
pose.tx = ori_tx + (hiprand(&state) % 3 - 1.f) * weight * ape_params.step.tx;
pose.ty = ori_ty + (hiprand(&state) % 3 - 1.f) * weight * ape_params.step.ty;
// tz
is_plus = (hiprand(&state) % 3 - 1.f);
float denom_tz = 1 - is_plus * ape_params.step.tz * ori_tz;
pose.tz = ori_tz + is_plus * ape_params.step.tz * (ori_tz * ori_tz) / denom_tz;
poses[i] = pose;
// condition
bool valid = (denom_tz != 0)
& (abs(sin_ori_rx) <= 1)
& (pose.tz >= ape_params.min_tz)
& (pose.tz <= ape_params.max_tz)
& (pose.rx >= ape_params.min_rx)
& (pose.rx <= ape_params.max_rx);
if (valid == false) {
valids[i] = false;
return;
}
// calculate homography parameters
pose.rx += 3.1415926;
// pre-compute sin and cos values
float cos_rz0 = cosf(pose.rz0);
float cos_rx = cosf(pose.rx);
float cos_rz1 = cosf(pose.rz1);
float sin_rz0 = sinf(pose.rz0);
float sin_rx = sinf(pose.rx);
float sin_rz1 = sinf(pose.rz1);
// z coordinate is y cross x, so add minus
float r11 = cos_rz0 * cos_rz1 - sin_rz0 * cos_rx * sin_rz1;
float r12 = -cos_rz0 * sin_rz1 - sin_rz0 * cos_rx * cos_rz1;
float r21 = sin_rz0 * cos_rz1 + cos_rz0 * cos_rx * sin_rz1;
float r22 = -sin_rz0 * sin_rz1 + cos_rz0 * cos_rx * cos_rz1;
float r31 = sin_rx * sin_rz1;
float r32 = sin_rx * cos_rz1;
// final transfomration
float t0 = ape_params.fx*r11 + ape_params.cx*r31;
float t1 = ape_params.fx*r12 + ape_params.cx*r32;
float t3 = ape_params.fx*pose.tx + ape_params.cx*pose.tz;
float t4 = ape_params.fy*r21 + ape_params.cy*r31;
float t5 = ape_params.fy*r22 + ape_params.cy*r32;
float t7 = ape_params.fy*pose.ty + ape_params.cy*pose.tz;
float t8 = r31;
float t9 = r32;
float t11 = pose.tz;
// reject transformations make template out of boundary
float inv_c1z = 1 / (t8*(-ape_params.tmp_real_w) + t9*(-ape_params.tmp_real_h) + t11);
float c1x = (t0*(-ape_params.tmp_real_w) + t1*(-ape_params.tmp_real_h) + t3) * inv_c1z;
float c1y = (t4*(-ape_params.tmp_real_w) + t5*(-ape_params.tmp_real_h) + t7) * inv_c1z;
float inv_c2z = 1 / (t8*(+ape_params.tmp_real_w) + t9*(-ape_params.tmp_real_h) + t11);
float c2x = (t0*(+ape_params.tmp_real_w) + t1*(-ape_params.tmp_real_h) + t3) * inv_c2z;
float c2y = (t4*(+ape_params.tmp_real_w) + t5*(-ape_params.tmp_real_h) + t7) * inv_c2z;
float inv_c3z = 1 / (t8*(+ape_params.tmp_real_w) + t9*(+ape_params.tmp_real_h) + t11);
float c3x = (t0*(+ape_params.tmp_real_w) + t1*(+ape_params.tmp_real_h) + t3) * inv_c3z;
float c3y = (t4*(+ape_params.tmp_real_w) + t5*(+ape_params.tmp_real_h) + t7) * inv_c3z;
float inv_c4z = 1 / (t8*(-ape_params.tmp_real_w) + t9*(+ape_params.tmp_real_h) + t11);
float c4x = (t0*(-ape_params.tmp_real_w) + t1*(+ape_params.tmp_real_h) + t3) * inv_c4z;
float c4y = (t4*(-ape_params.tmp_real_w) + t5*(+ape_params.tmp_real_h) + t7) * inv_c4z;
float minx = fminf(c1x, fminf(c2x, fminf(c3x, c4x)));
float maxx = fmaxf(c1x, fmaxf(c2x, fmaxf(c3x, c4x)));
float miny = fminf(c1y, fminf(c2y, fminf(c3y, c4y)));
float maxy = fmaxf(c1y, fmaxf(c2y, fmaxf(c3y, c4y)));
// reject transformations make marker too small in screen
float two_area = (c1x - c2x) * (c1y + c2y)
+ (c2x - c3x) * (c2y + c3y)
+ (c3x - c4x) * (c3y + c4y)
+ (c4x - c1x) * (c4y + c1y);
float area = abs(two_area / 2);
const int margin = 1;
if (area > area_thres
&& (minx >= margin)
&& (maxx <= ape_params.iw - 1 - margin)
&& (miny >= margin)
&& (maxy <= ape_params.ih - 1 - margin))
valids[i] = true;
else
valids[i] = false;
}
}
__global__
void fetchTzKernel(const Pose *poses,
size_t num_poses,
float *tzs) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= num_poses)
return;
tzs[idx] = poses[idx].tz;
}
} // namespace ape
| 886c2b1231d43450f74c321d9697369d57449537.cu | #include <cuda.h>
#include <device_launch_parameters.h>
#include <vector_functions.h>
#include <cuda_texture_types.h>
#include <curand.h>
#include <curand_kernel.h>
#include <texture_fetch_functions.h>
#include <opencv2/core/cuda_types.hpp>
#include <opencv2/core/cuda.hpp>
#include <opencv2/cudawarping.hpp>
#include <opencv2/cudafilters.hpp>
#include <opencv2/cudaimgproc.hpp>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/extrema.h>
#include <thrust/transform.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/random.h>
#include <thrust/inner_product.h>
#include <thrust/execution_policy.h>
#include <iostream>
#include <cstdlib>
#include <ctime>
#include <cmath>
#include <mex.h>
#include "coarseToFinePoseEsti.h"
#include "apeCommon.h"
namespace ape {
static const int BLOCK_W = 8;
static const int BLOCK_H = 8;
static const int BLOCK_SIZE_2D = BLOCK_W*BLOCK_H;
static const int BLOCK_SIZE = 256;
static const int ORI_SAMPLE_NUM = 448;
// constant memory
__constant__ float2 const_tmp_coors[ORI_SAMPLE_NUM];
__constant__ float3 const_tmp_vals[ORI_SAMPLE_NUM];
// texture memory
texture<float4, cudaTextureType2D, cudaReadModeElementType> tex_img;
void coarseToFinePoseEstimation(const cv::cuda::GpuMat &tmp,
const cv::cuda::GpuMat &img,
int prm_lvls,
bool photo_inva,
bool verbose,
ApeParams *ape_params,
double *ex_mat) {
// bind texture memory
tex_img.addressMode[0] = cudaAddressModeBorder;
tex_img.addressMode[1] = cudaAddressModeBorder;
tex_img.filterMode = cudaFilterModeLinear;
tex_img.normalized = false;
cudaChannelFormatDesc cuda_channel_format_desc = cudaCreateChannelDesc<float4>();
// poses
size_t num_poses;
thrust::device_vector<Pose> poses;
thrust::device_vector<float> dists;
thrust::host_vector<float> min_dists;
// parameters
const float2 tmp_real = make_float2(ape_params->tmp_real_w, ape_params->tmp_real_h);
const float factor = 1 / 1.511f;
const float2 constraint = (photo_inva) ? make_float2(0.075f, 0.15f) : make_float2(0.05f, 0.1f);
int level = 0;
int level_p = 0;
float begin_scale = 1.f/powf(4.f, prm_lvls-1.f);
for (float scale = begin_scale; scale <= 1; scale *= 4) {
if (verbose) {
mexPrintf("pyramid: %f\n", scale);
mexEvalString("drawnow;");
}
int sample_num = ORI_SAMPLE_NUM;
// rescale image
cv::cuda::GpuMat small_img, small_tmp;
ApeParams small_ape_params;
rescale(tmp, img, *ape_params, scale, &small_tmp, &small_img, &small_ape_params);
cudaBindTexture2D(0, &tex_img, small_img.data, &cuda_channel_format_desc,
small_ape_params.iw, small_ape_params.ih, small_img.step);
// allocate sample memory
thrust::device_vector<float2> tmp_coors(sample_num, make_float2(0, 0));
thrust::device_vector<float3> tmp_vals(sample_num, make_float3(0, 0, 0));
int2 tmp_dim = make_int2(small_ape_params.tw, small_ape_params.th);
while (true) {
// initialize the net
if (level == 0)
createSet(small_ape_params, &poses, &num_poses);
++level;
randSample(small_tmp, tmp_dim, tmp_real, sample_num, &tmp_coors, &tmp_vals);
dists.resize(num_poses);
size_t ori_num_poses = num_poses;
calDist(poses,
make_float4(small_ape_params.fx, small_ape_params.fy, small_ape_params.cx, small_ape_params.cy),
tmp_real,
make_int2(small_ape_params.iw, small_ape_params.ih),
photo_inva,
num_poses,
sample_num,
&dists);
cudaDeviceSynchronize();
auto dists_iter = thrust::min_element(dists.begin(), dists.end());
float min_dist = *dists_iter;
if (verbose) {
mexPrintf(" -- level %d -- epsilon %.3f, Number of Poses %d, Minimum Dist. %f\n",
level, small_ape_params.epsilon, num_poses, min_dist);
mexEvalString("drawnow;");
}
min_dists.push_back(min_dist);
// early termination
if ((min_dist < 0.005) || ((scale == 1) && (min_dist < 0.015)) ||
((level_p > 0) && (level_p != level) && (scale == 1) && (min_dist > calLastThreeTermsMean(min_dists, level-level_p)*0.97))) {
auto idx = dists_iter - dists.begin();
getExMat(poses[idx], ex_mat);
cudaUnbindTexture(&tex_img);
return;
}
// get poses by distance
bool too_high_percentage = getPosesByDistance(dists,
min_dist,
small_ape_params.epsilon,
&poses,
&num_poses);
// expand the pose set for next round
// if the initial pose set is not decent enough, recreate another new epsilon-covering set with smaller epsilon
if ((level == 1)
&& ((too_high_percentage && (min_dist > constraint.x) && (ori_num_poses < 7500000))
|| ((min_dist > constraint.y) && (ori_num_poses < 5000000)))) {
small_ape_params.ShrinkNet(0.9f);
level = 0;
min_dists.clear();
}
else {
expandPoses(factor, &poses, &small_ape_params, &num_poses);
thrust::device_vector<float> tzs(num_poses, 0.0);
const size_t BLOCK_NUM = ((num_poses) - 1) / BLOCK_SIZE + 1;
fetchTzKernel<<<BLOCK_NUM, BLOCK_SIZE>>>(thrust::raw_pointer_cast(poses.data()),
num_poses,
thrust::raw_pointer_cast(tzs.data()));
float pixelMaxMovement = small_ape_params.epsilon
* std::max(small_ape_params.fx, small_ape_params.fy)
* std::max(small_ape_params.tmp_real_w, small_ape_params.tmp_real_h)
* 2
/ (thrust::reduce(tzs.begin(), tzs.end()) / num_poses);
if (pixelMaxMovement < 1) {
level_p = level + 1;
ape_params->UpdateNet(small_ape_params);
break;
}
}
}
cudaUnbindTexture(&tex_img);
}
calDist(poses,
make_float4(ape_params->fx, ape_params->fy, ape_params->cx, ape_params->cy),
tmp_real,
make_int2(ape_params->iw, ape_params->ih),
photo_inva,
num_poses,
ORI_SAMPLE_NUM,
&dists);
cudaDeviceSynchronize();
auto dists_iter = thrust::min_element(dists.begin(), dists.end());
auto idx = dists_iter - dists.begin();
getExMat(poses[idx], ex_mat);
}
void rescale(const cv::cuda::GpuMat &tmp,
const cv::cuda::GpuMat &img,
const ApeParams &ape_params,
const float scale,
cv::cuda::GpuMat *small_tmp,
cv::cuda::GpuMat *small_img,
ApeParams *small_ape_params) {
cv::cuda::resize(img, *small_img, cv::Size(), scale, scale, CV_INTER_AREA);
cv::cuda::resize(tmp, *small_tmp, cv::Size(), scale, scale, CV_INTER_AREA);
*small_ape_params = ape_params;
small_ape_params->iw = small_img->cols;
small_ape_params->ih = small_img->rows;
small_ape_params->tw = small_tmp->cols;
small_ape_params->th = small_tmp->rows;
// modify intrinsic papameters
const float offset = -0.5f;
small_ape_params->fx = ape_params.fx * scale;
small_ape_params->fy = ape_params.fy * scale;
small_ape_params->cx = (ape_params.cx - offset) * scale + offset;
small_ape_params->cy = (ape_params.cy - offset) * scale + offset;
float tz_square = ape_params.max_tz * ape_params.min_tz;
float area = (2 * small_ape_params->fx * ape_params.tmp_real_w)
* (2 * small_ape_params->fy * ape_params.tmp_real_h) / tz_square;
float length = sqrt(area);
float total_variation = getTotalVariation(*small_tmp, small_ape_params->tw, small_ape_params->th, area);
cv::Ptr<cv::cuda::Filter> gaussian_filter_tmp = cv::cuda::createGaussianFilter(CV_32FC3, CV_32FC3, cv::Size(5, 5), 1);
cv::Ptr<cv::cuda::Filter> gaussian_filter_img = cv::cuda::createGaussianFilter(CV_32FC4, CV_32FC4, cv::Size(5, 5), 1);
while (total_variation > 8.42 * length) { // 8.42 is obtained emperically
gaussian_filter_tmp->apply(*small_tmp, *small_tmp);
gaussian_filter_img->apply(*small_img, *small_img);
total_variation = getTotalVariation(*small_tmp, small_ape_params->tw, small_ape_params->th, area);
}
}
float getTotalVariation(const cv::cuda::GpuMat &tmp,
int tw,
int th,
float area) {
// allocate
const int pxl_num = tw * th;
thrust::device_vector<float> variation(pxl_num, 0.0);
// kernel parameter for TV
dim3 b_dim(BLOCK_W, BLOCK_H);
dim3 g_dim((tw - 1) / BLOCK_W + 1, (th - 1) / BLOCK_H + 1);
variationKernel<<<g_dim, b_dim>>>(tmp, make_int2(tw, th), thrust::raw_pointer_cast(variation.data()));
float total_variation = thrust::reduce(variation.begin(), variation.end()) / pxl_num * area;
return total_variation;
}
__global__
void variationKernel(const cv::cuda::PtrStepSz<float3> tmp,
int2 dim,
float* variation) {
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
const int tid = tidy * blockDim.x + tidx;
const int x = blockIdx.x * blockDim.x + tidx;
const int y = blockIdx.y * blockDim.y + tidy;
// store pixels in specific window in shared memory
// the window is expanded by on pixel from block
const int ww = BLOCK_W + 2;
const int wh = BLOCK_H + 2;
const int ws = ww * wh;
__shared__ float window[ws];
// move data to the shared memory
int x_begin = blockIdx.x * blockDim.x - 1;
int y_begin = blockIdx.y * blockDim.y - 1;
for (int i = tid; i < ws; i += BLOCK_SIZE_2D) {
int wx = (i % ww) + x_begin;
int wy = (i / ww) + y_begin;
if (wx < 0 || wx >= dim.x || wy < 0 || wy >= dim.y)
window[i] = 2;
else
window[i] = tmp(wy, wx).x;
}
__syncthreads();
// out of range
if (x >= dim.x || y >= dim.y)
return;
// find max difference between center pixel and surroundings
float max_diff = 0;
float value = window[(tidy + 1)*ww + (tidx + 1)];
for (int idy = 0, wi = tidy*ww + tidx; idy < 3; ++idy, wi += (BLOCK_W - 1)) {
for (int idx = 0; idx < 3; ++idx, ++wi) {
float surr = window[wi];
if (surr != 2) {
float diff = std::abs(value - surr);
if (diff > max_diff)
max_diff = diff;
}
}
}
variation[y*dim.x + x] = max_diff;
}
void randSample(const cv::cuda::GpuMat &tmp,
int2 tmp_dim,
float2 tmp_real,
int sample_num,
thrust::device_vector<float2> *tmp_coors,
thrust::device_vector<float3> *tmp_vals) {
// rand pixel
srand(time(NULL));
thrust::device_vector<int2> rand_coor(sample_num, make_int2(0, 0));
thrust::counting_iterator<int> i0(rand()/2);
thrust::transform(i0, i0 + sample_num, rand_coor.begin(), CoorRngFunctor(tmp_dim.x, tmp_dim.y));
// get pixel value and position
const int BLOCK_NUM = (sample_num - 1) / BLOCK_SIZE + 1;
randSampleKernel<<<BLOCK_NUM, BLOCK_SIZE>>>(thrust::raw_pointer_cast(rand_coor.data()),
tmp,
tmp_dim,
tmp_real,
sample_num,
thrust::raw_pointer_cast(tmp_coors->data()),
thrust::raw_pointer_cast(tmp_vals->data()));
// bind to const mem
cudaMemcpyToSymbol(const_tmp_coors, thrust::raw_pointer_cast(tmp_coors->data()), sizeof(float2)* sample_num, 0, cudaMemcpyDeviceToDevice);
cudaMemcpyToSymbol(const_tmp_vals, thrust::raw_pointer_cast(tmp_vals->data()), sizeof(float3)* sample_num, 0, cudaMemcpyDeviceToDevice);
}
__global__
void randSampleKernel(const int2 *rand_coor,
const cv::cuda::PtrStepSz<float3> tmp,
int2 tmp_dim,
float2 tmp_real,
int sample_num,
float2 *tmp_coors,
float3 *tmp_vals) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= sample_num)
return;
int x = rand_coor[idx].x;
int y = rand_coor[idx].y;
tmp_vals[idx] = tmp(y, x);
float2 coor;
coor.x = (2 * float(x) + 1 - tmp_dim.x) / tmp_dim.x * tmp_real.x;
coor.y = -(2 * float(y) + 1 - tmp_dim.y) / tmp_dim.y * tmp_real.y;
tmp_coors[idx] = coor;
}
void createSet(const ApeParams &ape_params,
thrust::device_vector<Pose> *poses,
size_t *num_poses) {
// count
int count_total = 0;
thrust::host_vector<int4> count; // rz0 rz1 tx ty
// paramters
const float length = sqrt(ape_params.tmp_real_w * ape_params.tmp_real_w + ape_params.tmp_real_h * ape_params.tmp_real_h);
const int NUM_RZ0 = int((ape_params.max_rz - ape_params.min_rz) / ape_params.step.rz0) + 1;
const int NUM_RZ1 = int((ape_params.max_rz - ape_params.min_rz) / ape_params.step.rz1) + 1;
// counting
for (float tz = ape_params.min_tz; tz <= ape_params.max_tz; ) {
float bound_tx = fabs(ape_params.cx*tz / ape_params.fx - ape_params.tmp_real_h);
float bound_ty = fabs(ape_params.cy*tz / ape_params.fy - ape_params.tmp_real_h);
for (float rx = ape_params.min_rx; rx <= ape_params.max_rx; ) {
int num_rz0 = (rx != 0) ? NUM_RZ0 : 1;
int num_tx = int(2 * bound_tx / (ape_params.step.tx*(tz - length*sin(rx)))) + 1;
int num_ty = int(2 * bound_ty / (ape_params.step.ty*(tz - length*sin(rx)))) + 1;
count_total += (num_rz0 * NUM_RZ1 * num_tx * num_ty);
count.push_back(make_int4(num_rz0, NUM_RZ1, num_tx, num_ty));
float asin_value = 2 - 1 / (1 / (2 - sin(rx)) + ape_params.step.rx);
if (asin_value <= 1 && asin_value >= -1)
rx = asinf(asin_value);
else
rx = ape_params.max_rx + 1;
}
tz += tz*tz*ape_params.step.tz / (1 - ape_params.step.tz*tz);
}
// allocate
thrust::device_vector<bool> valids(count_total, false);
poses->resize(count_total);
// assignment
Pose* poses_ptr = thrust::raw_pointer_cast(poses->data());
bool* valids_ptr = thrust::raw_pointer_cast(valids.data());
auto count_iter = count.begin();
int begin_index = 0;
int area_thres = std::round(0.01 * ape_params.iw * ape_params.ih);
for (float tz = ape_params.min_tz; tz <= ape_params.max_tz; ) {
float bound_tx = fabs(ape_params.cx*tz / ape_params.fx - ape_params.tmp_real_h);
float bound_ty = fabs(ape_params.cy*tz / ape_params.fy - ape_params.tmp_real_h);
for (float rx = ape_params.min_rx; rx <= ape_params.max_rx; ) {
int num = (*count_iter).x * (*count_iter).y * (*count_iter).z * (*count_iter).w;
const int BLOCK_NUM = (num - 1) / BLOCK_SIZE + 1;
float2 bound = make_float2(bound_tx, bound_ty);
createSetKernel<<<BLOCK_NUM, BLOCK_SIZE>>>(begin_index,
*count_iter,
num,
tz,
rx,
ape_params,
bound,
length,
area_thres,
poses_ptr,
valids_ptr);
begin_index += num;
++count_iter;
float asin_value = 2 - 1 / (1 / (2 - sin(rx)) + ape_params.step.rx);
if (asin_value <= 1 && asin_value >= -1)
rx = asinf(asin_value);
else
rx = ape_params.max_rx + 1;
}
tz += tz*tz*ape_params.step.tz / (1 - ape_params.step.tz*tz);
}
if (begin_index != count_total)
std::cerr << "error occur in 'createSet'!" << std::endl;
// remove non-valid poses
auto zip_it_valid_end = thrust::remove_if(
thrust::make_zip_iterator(thrust::make_tuple(poses->begin(), valids.begin())),
thrust::make_zip_iterator(thrust::make_tuple(poses->end(), valids.end())),
ValidFunctor()
);
poses->erase(thrust::get<0>(zip_it_valid_end.get_iterator_tuple()), poses->end());
*num_poses = poses->size();
}
__global__
void createSetKernel(int begin_index,
int4 count,
int num,
float tz,
float rx,
ApeParams ape_params,
float2 bound,
float length,
int area_thres,
Pose *poses,
bool *valids) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= num)
return;
const int num_rz0 = count.x;
const int num_rz1 = count.y;
const int num_tx = count.z;
const int num_ty = count.w;
const int id_rz0 = idx % num_rz0;
const int id_rz1 = (idx / num_rz0) % num_rz1;
const int id_ty = (idx / (num_rz0 * num_rz1)) % num_ty;
const int id_tx = (idx / (num_rz0 * num_rz1 * num_ty)) % num_tx;
Pose pose;
pose.rz0 = ape_params.min_rz + id_rz0*ape_params.step.rz0;
pose.rx = rx;
pose.rz1 = ape_params.min_rz + id_rz1*ape_params.step.rz1;
pose.tx = -bound.x + id_tx*ape_params.step.tx*(tz - length*sinf(rx));
pose.ty = -bound.y + id_ty*ape_params.step.ty*(tz - length*sinf(rx));
pose.tz = tz;
int index = idx + begin_index;
poses[index] = pose;
// calculate homography parameters
pose.rx += 3.1415926f;
// pre-compute sin and cos values
float cos_rz0 = cosf(pose.rz0);
float cos_rx = cosf(pose.rx);
float cos_rz1 = cosf(pose.rz1);
float sin_rz0 = sinf(pose.rz0);
float sin_rx = sinf(pose.rx);
float sin_rz1 = sinf(pose.rz1);
// z coordinate is y cross x, so add minus
float r11 = cos_rz0 * cos_rz1 - sin_rz0 * cos_rx * sin_rz1;
float r12 = -cos_rz0 * sin_rz1 - sin_rz0 * cos_rx * cos_rz1;
float r21 = sin_rz0 * cos_rz1 + cos_rz0 * cos_rx * sin_rz1;
float r22 = -sin_rz0 * sin_rz1 + cos_rz0 * cos_rx * cos_rz1;
float r31 = sin_rx * sin_rz1;
float r32 = sin_rx * cos_rz1;
// final transfomration
float t0 = ape_params.fx*r11 + ape_params.cx*r31;
float t1 = ape_params.fx*r12 + ape_params.cx*r32;
float t3 = ape_params.fx*pose.tx + ape_params.cx*pose.tz;
float t4 = ape_params.fy*r21 + ape_params.cy*r31;
float t5 = ape_params.fy*r22 + ape_params.cy*r32;
float t7 = ape_params.fy*pose.ty + ape_params.cy*pose.tz;
float t8 = r31;
float t9 = r32;
float t11 = pose.tz;
// reject transformations make template out of boundary
float inv_c1z = 1 / (t8*(-ape_params.tmp_real_w) + t9*(-ape_params.tmp_real_h) + t11);
float c1x = (t0*(-ape_params.tmp_real_w) + t1*(-ape_params.tmp_real_h) + t3) * inv_c1z;
float c1y = (t4*(-ape_params.tmp_real_w) + t5*(-ape_params.tmp_real_h) + t7) * inv_c1z;
float inv_c2z = 1 / (t8*(+ape_params.tmp_real_w) + t9*(-ape_params.tmp_real_h) + t11);
float c2x = (t0*(+ape_params.tmp_real_w) + t1*(-ape_params.tmp_real_h) + t3) * inv_c2z;
float c2y = (t4*(+ape_params.tmp_real_w) + t5*(-ape_params.tmp_real_h) + t7) * inv_c2z;
float inv_c3z = 1 / (t8*(+ape_params.tmp_real_w) + t9*(+ape_params.tmp_real_h) + t11);
float c3x = (t0*(+ape_params.tmp_real_w) + t1*(+ape_params.tmp_real_h) + t3) * inv_c3z;
float c3y = (t4*(+ape_params.tmp_real_w) + t5*(+ape_params.tmp_real_h) + t7) * inv_c3z;
float inv_c4z = 1 / (t8*(-ape_params.tmp_real_w) + t9*(+ape_params.tmp_real_h) + t11);
float c4x = (t0*(-ape_params.tmp_real_w) + t1*(+ape_params.tmp_real_h) + t3) * inv_c4z;
float c4y = (t4*(-ape_params.tmp_real_w) + t5*(+ape_params.tmp_real_h) + t7) * inv_c4z;
float minx = fminf(c1x, fminf(c2x, fminf(c3x, c4x)));
float maxx = fmaxf(c1x, fmaxf(c2x, fmaxf(c3x, c4x)));
float miny = fminf(c1y, fminf(c2y, fminf(c3y, c4y)));
float maxy = fmaxf(c1y, fmaxf(c2y, fmaxf(c3y, c4y)));
// reject transformations make marker too small in screen
float two_area = (c1x - c2x) * (c1y + c2y)
+ (c2x - c3x) * (c2y + c3y)
+ (c3x - c4x) * (c3y + c4y)
+ (c4x - c1x) * (c4y + c1y);
float area = abs(two_area / 2);
const int margin = 1;
if (area > area_thres
&& (minx >= margin)
&& (maxx <= ape_params.iw -1 - margin)
&& (miny >= margin)
&& (maxy <= ape_params.ih -1 - margin))
valids[index] = true;
else
valids[index] = false;
}
void calDist(const thrust::device_vector<Pose> &poses,
float4 in_params,
float2 tmp_real,
int2 img_dim,
bool photo_inva,
size_t num_poses,
size_t sample_num,
thrust::device_vector<float> *dists) {
const size_t BLOCK_NUM = (num_poses - 1) / BLOCK_SIZE + 1;
float inv_sample_num = 1.f / sample_num;
if (photo_inva) {
thrust::device_vector<float> tmp_y(sample_num);
getTmpY<<<int((sample_num - 1) / BLOCK_SIZE + 1), BLOCK_SIZE >>>(thrust::raw_pointer_cast(tmp_y.data()), sample_num);
float sum_of_tmp_y = thrust::reduce(tmp_y.begin(), tmp_y.end());
float mean_tmp = sum_of_tmp_y * inv_sample_num;
float sum_of_sq_tmp_y = thrust::inner_product(thrust::device, tmp_y.begin(), tmp_y.end(), tmp_y.begin(), 0.f);
float sig_tmp = sqrt(fmaxf((sum_of_sq_tmp_y - (sum_of_tmp_y*sum_of_tmp_y) * inv_sample_num), 0.f) * inv_sample_num) + 1e-7f;
calDistInvarKernel<<<BLOCK_NUM, BLOCK_SIZE>>>(thrust::raw_pointer_cast(poses.data()),
in_params,
tmp_real,
img_dim,
num_poses,
sample_num,
inv_sample_num,
mean_tmp,
sig_tmp,
thrust::raw_pointer_cast(dists->data()));
}
else {
calDistColorKernel<<<BLOCK_NUM, BLOCK_SIZE>>>(thrust::raw_pointer_cast(poses.data()),
in_params,
tmp_real,
img_dim,
num_poses,
sample_num,
inv_sample_num,
thrust::raw_pointer_cast(dists->data()));
}
}
__global__
void getTmpY(float *tmp_y,
int sample_num) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= sample_num)
return;
tmp_y[idx] = const_tmp_vals[idx].x;
}
__global__
void calDistInvarKernel(const Pose *poses,
float4 in_params,
float2 tmp_real,
int2 img_dim,
size_t num_poses,
size_t sample_num,
float inv_sample_num,
float mean_tmp,
float sig_tmp,
float *dists) {
const size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= num_poses)
return;
// get pose parameter
float rz0 = poses[idx].rz0;
float rx = poses[idx].rx + 3.1415926f;
float rz1 = poses[idx].rz1;
float tx = poses[idx].tx;
float ty = poses[idx].ty;
float tz = poses[idx].tz;
float cos_rz0 = cosf(rz0);
float cos_rx = cosf(rx);
float cos_rz1 = cosf(rz1);
float sin_rz0 = sinf(rz0);
float sin_rx = sinf(rx);
float sin_rz1 = sinf(rz1);
// z coordinate is y cross x, so add minus
float r11 = cos_rz0 * cos_rz1 - sin_rz0 * cos_rx * sin_rz1;
float r12 = -cos_rz0 * sin_rz1 - sin_rz0 * cos_rx * cos_rz1;
float r21 = sin_rz0 * cos_rz1 + cos_rz0 * cos_rx * sin_rz1;
float r22 = -sin_rz0 * sin_rz1 + cos_rz0 * cos_rx * cos_rz1;
float r31 = sin_rx * sin_rz1;
float r32 = sin_rx * cos_rz1;
// final transfomration
float t0 = in_params.x*r11 + in_params.z*r31;
float t1 = in_params.x*r12 + in_params.z*r32;
float t3 = in_params.x*tx + in_params.z*tz;
float t4 = in_params.y*r21 + in_params.w*r31;
float t5 = in_params.y*r22 + in_params.w*r32;
float t7 = in_params.y*ty + in_params.w*tz;
float t8 = r31;
float t9 = r32;
float t11 = tz;
// calculate distance
float score = 0.f;
// parameters for normalization
float sum_of_img_y = 0;
float sum_of_sq_img_y = 0;
for (int i = 0; i < sample_num; ++i) {
// calculate coordinate on camera image
float inv_z = 1 / (t8*const_tmp_coors[i].x + t9*const_tmp_coors[i].y + t11);
float u = (t0*const_tmp_coors[i].x + t1*const_tmp_coors[i].y + t3) * inv_z;
float v = (t4*const_tmp_coors[i].x + t5*const_tmp_coors[i].y + t7) * inv_z;
// get value from constant memory
float3 tmp_val = const_tmp_vals[i];
// get value from texture
// have to add 0.5f for coordinates (see E.2 Linear Filtering in CUDA Programming Guide)
float4 img_val = tex2D(tex_img, u + 0.5f, v + 0.5f);
// accumulation for normalization
sum_of_img_y += img_val.x;
sum_of_sq_img_y += img_val.x*img_val.x;
float inv_num = 1.f / (i + 1);
float sig_img = sqrt(fmaxf((sum_of_sq_img_y - (sum_of_img_y*sum_of_img_y) * inv_num), 0.f) * inv_num) + 1e-7f;
float mean_img = sum_of_img_y * inv_num;
float sig_tmp_over_sig_img = sig_tmp / sig_img;
float faster = -mean_tmp + sig_tmp_over_sig_img * mean_img;
// calculate distant
score += 0.50f * abs(tmp_val.x - sig_tmp_over_sig_img*img_val.x + faster)
+ 0.25f * abs(img_val.y - tmp_val.y)
+ 0.25f *abs(img_val.z - tmp_val.z);
}
dists[idx] = score * inv_sample_num;
}
__global__
void calDistColorKernel(const Pose *poses,
float4 in_params,
float2 tmp_real,
int2 img_dim,
size_t num_poses,
size_t sample_num,
float inv_sample_num,
float *dists) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= num_poses)
return;
// get pose parameter
float rz0 = poses[idx].rz0;
float rx = poses[idx].rx + 3.1415926f;
float rz1 = poses[idx].rz1;
float tx = poses[idx].tx;
float ty = poses[idx].ty;
float tz = poses[idx].tz;
float cos_rz0 = cosf(rz0);
float cos_rx = cosf(rx);
float cos_rz1 = cosf(rz1);
float sin_rz0 = sinf(rz0);
float sin_rx = sinf(rx);
float sin_rz1 = sinf(rz1);
// z coordinate is y cross x, so add minus
float r11 = cos_rz0 * cos_rz1 - sin_rz0 * cos_rx * sin_rz1;
float r12 = -cos_rz0 * sin_rz1 - sin_rz0 * cos_rx * cos_rz1;
float r21 = sin_rz0 * cos_rz1 + cos_rz0 * cos_rx * sin_rz1;
float r22 = -sin_rz0 * sin_rz1 + cos_rz0 * cos_rx * cos_rz1;
float r31 = sin_rx * sin_rz1;
float r32 = sin_rx * cos_rz1;
// final transfomration
float t0 = in_params.x*r11 + in_params.z*r31;
float t1 = in_params.x*r12 + in_params.z*r32;
float t3 = in_params.x*tx + in_params.z*tz;
float t4 = in_params.y*r21 + in_params.w*r31;
float t5 = in_params.y*r22 + in_params.w*r32;
float t7 = in_params.y*ty + in_params.w*tz;
float t8 = r31;
float t9 = r32;
float t11 = tz;
// calculate distance
float score = 0.0;
for (int i = 0; i < sample_num; ++i) {
// calculate coordinate on camera image
float inv_z = 1 / (t8*const_tmp_coors[i].x + t9*const_tmp_coors[i].y + t11);
float u = (t0*const_tmp_coors[i].x + t1*const_tmp_coors[i].y + t3) * inv_z;
float v = (t4*const_tmp_coors[i].x + t5*const_tmp_coors[i].y + t7) * inv_z;
// get value from constant memory
float3 tmp_val = const_tmp_vals[i];
// get value from texture
// have to add 0.5f for coordinates (see E.2 Linear Filtering in CUDA Programming Guide)
float4 img_val = tex2D(tex_img, u + 0.5f, v + 0.5f);
// calculate distant
score += 0.50 * abs(img_val.x - tmp_val.x)
+ 0.25 * abs(img_val.y - tmp_val.y)
+ 0.25 * abs(img_val.z - tmp_val.z);
}
dists[idx] = score * inv_sample_num;
}
float calLastThreeTermsMean(thrust::host_vector<float> &min_dists,
int iter_times) {
float sum = 0;
int count = 0;
iter_times = (iter_times < 3) ? iter_times : 3;
for (auto it = min_dists.rbegin(); it != min_dists.rend() && count < iter_times; ++it, ++count) {
sum += *it;
}
return sum / count;
}
void getExMat(const Pose &pose,
double *ex_mat) {
float rz0 = pose.rz0;
float rx = pose.rx + 3.1415926f;
float rz1 = pose.rz1;
float sin_rz0 = sin(rz0);
float cos_rz0 = cos(rz0);
float sin_rx = sin(rx);
float cos_rx = cos(rx);
float sin_rz1 = sin(rz1);
float cos_rz1 = cos(rz1);
ex_mat[0] = double(cos_rz0*cos_rz1 - sin_rz0*cos_rx*sin_rz1);
ex_mat[4] = double(-cos_rz0*sin_rz1 - sin_rz0*cos_rx*cos_rz1);
ex_mat[8] = double(sin_rz0*sin_rx);
ex_mat[12] = double(pose.tx);
ex_mat[1] = double(sin_rz0*cos_rz1 + cos_rz0*cos_rx*sin_rz1);
ex_mat[5] = double(-sin_rz0*sin_rz1 + cos_rz0*cos_rx*cos_rz1);
ex_mat[9] = double(-cos_rz0*sin_rx);
ex_mat[13] = double(pose.ty);
ex_mat[2] = double(sin_rx*sin_rz1);
ex_mat[6] = double(sin_rx*cos_rz1);
ex_mat[10] = double(cos_rx);
ex_mat[14] = double(pose.tz);
ex_mat[3] = 0.0;
ex_mat[7] = 0.0;
ex_mat[11] = 0.0;
ex_mat[15] = 1.0;
}
bool getPosesByDistance(const thrust::device_vector<float> &dists,
float min_dist,
float epsilon,
thrust::device_vector<Pose> *poses,
size_t *num_poses) {
// get initial threhold
const float threshold = 0.19 * epsilon + 0.01372;
min_dist += threshold;
// count reductions
bool too_high_percentage = false;
bool first = true;
size_t count = INT_MAX;
thrust::device_vector<bool> survivals(*num_poses, false);
const size_t BLOCK_NUM = (*num_poses - 1) / BLOCK_SIZE + 1;
while (true) {
getPosesByDistanceKernel<<<BLOCK_NUM, BLOCK_SIZE>>>(thrust::raw_pointer_cast(dists.data()),
min_dist,
dists.size(),
thrust::raw_pointer_cast(survivals.data()));
count = thrust::count(survivals.begin(), survivals.end(), true);
if (first) {
float percentage = float(count) / *num_poses;
too_high_percentage = (percentage > 0.1f);
first = false;
}
// reduce the size of pose set to prevent from out of memory
if (count < 27000) {
if (count == 0) {
auto dists_iter = thrust::min_element(dists.begin(), dists.end());
unsigned int position = dists_iter - dists.begin();
thrust::device_vector<Pose> temp_pose(1);
temp_pose[0] = (*poses)[position];
*poses = temp_pose;
*num_poses = 1;
}
else {
// prune poses
auto zip_it_valid_end = thrust::remove_if(
thrust::make_zip_iterator(thrust::make_tuple(poses->begin(), survivals.begin())),
thrust::make_zip_iterator(thrust::make_tuple(poses->end(), survivals.end())),
ValidFunctor()
);
poses->erase(thrust::get<0>(zip_it_valid_end.get_iterator_tuple()), poses->end());
*num_poses = count;
}
break;
}
min_dist *= 0.99f;
}
return too_high_percentage;
}
__global__
void getPosesByDistanceKernel(const float *dists,
float threshold,
size_t num_poses,
bool *survivals) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= num_poses)
return;
survivals[idx] = (dists[idx] < threshold) ? true : false;
}
void expandPoses(float factor,
thrust::device_vector<Pose> *poses,
ApeParams *ape_params,
size_t *num_poses) {
// number of expand points
const int multiple = 80;
size_t new_num_poses = (*num_poses) * (multiple + 1);
// decrease step
ape_params->ShrinkNet(factor);
// expand origin set
const size_t BLOCK_NUM = ((*num_poses) - 1) / BLOCK_SIZE + 1;
int area_thres = 0.01 * ape_params->iw * ape_params->ih;
thrust::device_vector<bool> valids(new_num_poses, true);
poses->resize(new_num_poses);
expandPosesKernel<<<BLOCK_NUM, BLOCK_SIZE>>>(*num_poses,
new_num_poses,
*ape_params,
area_thres,
thrust::raw_pointer_cast(poses->data()),
thrust::raw_pointer_cast(valids.data()));
// remove invalid terms
auto zip_it_valid_end = thrust::remove_if(
thrust::make_zip_iterator(thrust::make_tuple(poses->begin(), valids.begin())),
thrust::make_zip_iterator(thrust::make_tuple(poses->end(), valids.end())),
ValidFunctor()
);
poses->erase(thrust::get<0>(zip_it_valid_end.get_iterator_tuple()), poses->end());
*num_poses = poses->size();
}
__global__
void expandPosesKernel(size_t num_poses,
size_t new_num_poses,
ApeParams ape_params,
int area_thres,
Pose *poses,
bool *valids) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= num_poses)
return;
curandState_t state;
curand_init(idx, 0, 0, &state);
float ori_rz0 = poses[idx].rz0;
float ori_rx = poses[idx].rx;
float ori_rz1 = poses[idx].rz1;
float ori_tx = poses[idx].tx;
float ori_ty = poses[idx].ty;
float ori_tz = poses[idx].tz;
for (unsigned int i = idx + num_poses; i < new_num_poses; i += num_poses) {
// rz0
Pose pose;
pose.rz0 = ori_rz0 + (curand(&state) % 3 - 1.f)*ape_params.step.rz0;
// rx
float is_plus = (curand(&state) % 3 - 1.f);
float sin_ori_rx = 2 - 1 / (1 / (2 - sinf(ori_rx)) + is_plus*ape_params.step.rx);
pose.rx = ori_rx + is_plus * is_plus * (asinf(sin_ori_rx) - ori_rx);
// rz1
pose.rz1 = ori_rz1 + (curand(&state) % 3 - 1.f)*ape_params.step.rz1;
// tx ty
float weight = ori_tz + sqrt(ape_params.tmp_real_w*ape_params.tmp_real_w + ape_params.tmp_real_h*ape_params.tmp_real_h) * sinf(ori_rx);
pose.tx = ori_tx + (curand(&state) % 3 - 1.f) * weight * ape_params.step.tx;
pose.ty = ori_ty + (curand(&state) % 3 - 1.f) * weight * ape_params.step.ty;
// tz
is_plus = (curand(&state) % 3 - 1.f);
float denom_tz = 1 - is_plus * ape_params.step.tz * ori_tz;
pose.tz = ori_tz + is_plus * ape_params.step.tz * (ori_tz * ori_tz) / denom_tz;
poses[i] = pose;
// condition
bool valid = (denom_tz != 0)
& (abs(sin_ori_rx) <= 1)
& (pose.tz >= ape_params.min_tz)
& (pose.tz <= ape_params.max_tz)
& (pose.rx >= ape_params.min_rx)
& (pose.rx <= ape_params.max_rx);
if (valid == false) {
valids[i] = false;
return;
}
// calculate homography parameters
pose.rx += 3.1415926;
// pre-compute sin and cos values
float cos_rz0 = cosf(pose.rz0);
float cos_rx = cosf(pose.rx);
float cos_rz1 = cosf(pose.rz1);
float sin_rz0 = sinf(pose.rz0);
float sin_rx = sinf(pose.rx);
float sin_rz1 = sinf(pose.rz1);
// z coordinate is y cross x, so add minus
float r11 = cos_rz0 * cos_rz1 - sin_rz0 * cos_rx * sin_rz1;
float r12 = -cos_rz0 * sin_rz1 - sin_rz0 * cos_rx * cos_rz1;
float r21 = sin_rz0 * cos_rz1 + cos_rz0 * cos_rx * sin_rz1;
float r22 = -sin_rz0 * sin_rz1 + cos_rz0 * cos_rx * cos_rz1;
float r31 = sin_rx * sin_rz1;
float r32 = sin_rx * cos_rz1;
// final transfomration
float t0 = ape_params.fx*r11 + ape_params.cx*r31;
float t1 = ape_params.fx*r12 + ape_params.cx*r32;
float t3 = ape_params.fx*pose.tx + ape_params.cx*pose.tz;
float t4 = ape_params.fy*r21 + ape_params.cy*r31;
float t5 = ape_params.fy*r22 + ape_params.cy*r32;
float t7 = ape_params.fy*pose.ty + ape_params.cy*pose.tz;
float t8 = r31;
float t9 = r32;
float t11 = pose.tz;
// reject transformations make template out of boundary
float inv_c1z = 1 / (t8*(-ape_params.tmp_real_w) + t9*(-ape_params.tmp_real_h) + t11);
float c1x = (t0*(-ape_params.tmp_real_w) + t1*(-ape_params.tmp_real_h) + t3) * inv_c1z;
float c1y = (t4*(-ape_params.tmp_real_w) + t5*(-ape_params.tmp_real_h) + t7) * inv_c1z;
float inv_c2z = 1 / (t8*(+ape_params.tmp_real_w) + t9*(-ape_params.tmp_real_h) + t11);
float c2x = (t0*(+ape_params.tmp_real_w) + t1*(-ape_params.tmp_real_h) + t3) * inv_c2z;
float c2y = (t4*(+ape_params.tmp_real_w) + t5*(-ape_params.tmp_real_h) + t7) * inv_c2z;
float inv_c3z = 1 / (t8*(+ape_params.tmp_real_w) + t9*(+ape_params.tmp_real_h) + t11);
float c3x = (t0*(+ape_params.tmp_real_w) + t1*(+ape_params.tmp_real_h) + t3) * inv_c3z;
float c3y = (t4*(+ape_params.tmp_real_w) + t5*(+ape_params.tmp_real_h) + t7) * inv_c3z;
float inv_c4z = 1 / (t8*(-ape_params.tmp_real_w) + t9*(+ape_params.tmp_real_h) + t11);
float c4x = (t0*(-ape_params.tmp_real_w) + t1*(+ape_params.tmp_real_h) + t3) * inv_c4z;
float c4y = (t4*(-ape_params.tmp_real_w) + t5*(+ape_params.tmp_real_h) + t7) * inv_c4z;
float minx = fminf(c1x, fminf(c2x, fminf(c3x, c4x)));
float maxx = fmaxf(c1x, fmaxf(c2x, fmaxf(c3x, c4x)));
float miny = fminf(c1y, fminf(c2y, fminf(c3y, c4y)));
float maxy = fmaxf(c1y, fmaxf(c2y, fmaxf(c3y, c4y)));
// reject transformations make marker too small in screen
float two_area = (c1x - c2x) * (c1y + c2y)
+ (c2x - c3x) * (c2y + c3y)
+ (c3x - c4x) * (c3y + c4y)
+ (c4x - c1x) * (c4y + c1y);
float area = abs(two_area / 2);
const int margin = 1;
if (area > area_thres
&& (minx >= margin)
&& (maxx <= ape_params.iw - 1 - margin)
&& (miny >= margin)
&& (maxy <= ape_params.ih - 1 - margin))
valids[i] = true;
else
valids[i] = false;
}
}
__global__
void fetchTzKernel(const Pose *poses,
size_t num_poses,
float *tzs) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= num_poses)
return;
tzs[idx] = poses[idx].tz;
}
} // namespace ape
|
dd4ae3ae148f32af3f21e15ab6118c2598e09f4a.hip | // !!! This is a file automatically generated by hipify!!!
/*************************************************************************
> File Name: 05_0304.cu
> Author: dong xu
> Mail: gwmxyd@163.com
> Created Time: 20160330 133715
************************************************************************/
#include <stdio.h>
#include <hip/hip_runtime.h>
const int gridSize=7,blockSize=1;
hipError_t addWithCuda(int *sum);
__global__ void addKernel(int *sum)
{
int i = blockIdx.x*blockDim.x +threadIdx.x;
int j=0;
int tsum = 1;
//atomicAdd(sum,i);
//tsum = *tsum + i;
//atomicCAS(sum,tsum,*sum);
for(j=0;j<i;j++)
tsum *=10;
for(j=0;j<tsum;j++);
atomicAdd(sum,i);
printf("thread %d:tsum=%d,sum=%d\n",i,tsum,*sum);
}
int main()
{
int sum = 1;
hipError_t cudaStatus;
int num = 0;
hipDeviceProp_t prop;
cudaStatus = hipGetDeviceCount(&num);
for(int i = 0;i<num;i++)
{
hipGetDeviceProperties(&prop,i);
}
cudaStatus = addWithCuda(&sum);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("Final sum=%d\n",sum);
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
hipError_t addWithCuda(int *sum)
{
int *dev_sum = 0;
hipError_t cudaStatus;
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_sum, sizeof(int));
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_sum,sum,sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
printf("addKernel<<<%d,%d>>>(%d)\n",gridSize,blockSize,*sum);
hipLaunchKernelGGL(( addKernel), dim3(gridSize),dim3(blockSize), 0, 0, dev_sum);
cudaStatus = hipDeviceSynchronize();
cudaStatus = hipMemcpy(sum,dev_sum,sizeof(int),hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_sum);
return cudaStatus;
}
| dd4ae3ae148f32af3f21e15ab6118c2598e09f4a.cu | /*************************************************************************
> File Name: 05_0304.cu
> Author: dong xu
> Mail: gwmxyd@163.com
> Created Time: 2016年03月30日 星期三 13时37分15秒
************************************************************************/
#include <stdio.h>
#include <cuda_runtime.h>
const int gridSize=7,blockSize=1;
cudaError_t addWithCuda(int *sum);
__global__ void addKernel(int *sum)
{
int i = blockIdx.x*blockDim.x +threadIdx.x;
int j=0;
int tsum = 1;
//atomicAdd(sum,i);
//tsum = *tsum + i;
//atomicCAS(sum,tsum,*sum);
for(j=0;j<i;j++)
tsum *=10;
for(j=0;j<tsum;j++);
atomicAdd(sum,i);
printf("thread %d:tsum=%d,sum=%d\n",i,tsum,*sum);
}
int main()
{
int sum = 1;
cudaError_t cudaStatus;
int num = 0;
cudaDeviceProp prop;
cudaStatus = cudaGetDeviceCount(&num);
for(int i = 0;i<num;i++)
{
cudaGetDeviceProperties(&prop,i);
}
cudaStatus = addWithCuda(&sum);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("Final sum=%d\n",sum);
cudaStatus = cudaThreadExit();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaThreadExit failed!");
return 1;
}
return 0;
}
cudaError_t addWithCuda(int *sum)
{
int *dev_sum = 0;
cudaError_t cudaStatus;
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_sum, sizeof(int));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_sum,sum,sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
printf("addKernel<<<%d,%d>>>(%d)\n",gridSize,blockSize,*sum);
addKernel<<<gridSize,blockSize>>>(dev_sum);
cudaStatus = cudaThreadSynchronize();
cudaStatus = cudaMemcpy(sum,dev_sum,sizeof(int),cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_sum);
return cudaStatus;
}
|
8bbb698c516ca88b1049929c1160b8dafd7c1ea1.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2009-2016 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "ComputeFreeVolumeGPU.cuh"
#include "IntegratorHPMCMonoGPU.cuh"
#include "IntegratorHPMCMonoImplicitGPU.cuh"
#include "ShapeSphere.h"
#include "ShapeConvexPolygon.h"
#include "ShapePolyhedron.h"
#include "ShapeConvexPolyhedron.h"
#include "ShapeSpheropolyhedron.h"
#include "ShapeSpheropolygon.h"
#include "ShapeSimplePolygon.h"
#include "ShapeEllipsoid.h"
#include "ShapeFacetedSphere.h"
#include "ShapeSphinx.h"
#include "ShapeUnion.h"
namespace hpmc
{
namespace detail
{
//! HPMC kernels for ShapeUnion<ShapeSphere, 8>
template hipError_t gpu_hpmc_free_volume<ShapeUnion<ShapeSphere, 8> >(const hpmc_free_volume_args_t &args,
const typename ShapeUnion<ShapeSphere, 8> ::param_type *d_params);
template hipError_t gpu_hpmc_update<ShapeUnion<ShapeSphere, 8> >(const hpmc_args_t& args,
const typename ShapeUnion<ShapeSphere, 8> ::param_type *d_params);
template void gpu_hpmc_implicit_count_overlaps<ShapeUnion<ShapeSphere, 8> >(const hpmc_implicit_args_t& args,
const typename ShapeUnion<ShapeSphere, 8> ::param_type *d_params);
template hipError_t gpu_hpmc_implicit_accept_reject<ShapeUnion<ShapeSphere, 8> >(const hpmc_implicit_args_t& args,
const typename ShapeUnion<ShapeSphere, 8> ::param_type *d_params);
}; // end namespace detail
} // end namespace hpmc
| 8bbb698c516ca88b1049929c1160b8dafd7c1ea1.cu | // Copyright (c) 2009-2016 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "ComputeFreeVolumeGPU.cuh"
#include "IntegratorHPMCMonoGPU.cuh"
#include "IntegratorHPMCMonoImplicitGPU.cuh"
#include "ShapeSphere.h"
#include "ShapeConvexPolygon.h"
#include "ShapePolyhedron.h"
#include "ShapeConvexPolyhedron.h"
#include "ShapeSpheropolyhedron.h"
#include "ShapeSpheropolygon.h"
#include "ShapeSimplePolygon.h"
#include "ShapeEllipsoid.h"
#include "ShapeFacetedSphere.h"
#include "ShapeSphinx.h"
#include "ShapeUnion.h"
namespace hpmc
{
namespace detail
{
//! HPMC kernels for ShapeUnion<ShapeSphere, 8>
template cudaError_t gpu_hpmc_free_volume<ShapeUnion<ShapeSphere, 8> >(const hpmc_free_volume_args_t &args,
const typename ShapeUnion<ShapeSphere, 8> ::param_type *d_params);
template cudaError_t gpu_hpmc_update<ShapeUnion<ShapeSphere, 8> >(const hpmc_args_t& args,
const typename ShapeUnion<ShapeSphere, 8> ::param_type *d_params);
template void gpu_hpmc_implicit_count_overlaps<ShapeUnion<ShapeSphere, 8> >(const hpmc_implicit_args_t& args,
const typename ShapeUnion<ShapeSphere, 8> ::param_type *d_params);
template cudaError_t gpu_hpmc_implicit_accept_reject<ShapeUnion<ShapeSphere, 8> >(const hpmc_implicit_args_t& args,
const typename ShapeUnion<ShapeSphere, 8> ::param_type *d_params);
}; // end namespace detail
} // end namespace hpmc
|
cf2e7f37003161248c78e8aa671166ec42252b25.hip | // !!! This is a file automatically generated by hipify!!!
/*
#include <stdio.h>
#include <sstream>
#include <iostream>
#include <time.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <rocblas.h>
#include <cudnn.h>
#include "yuv_data.h"
struct qvrcnn_data {
char weight[5 * 5 * 64];
int bias[64];
};//
void HWCN2NHWC(char *HWCN, char *NHWC, int H, int W, int C, int N)
{
int i, j, k, m;
for (i = 0;i < H;i++)
for (j = 0;j < W;j++)
for (k = 0;k < C;k++)
for (m = 0;m < N;m++)
NHWC[m*H*W*C + i*W*C + j*C + k] = HWCN[i*W*C*N + j*C*N + k*N + m];
}
struct qvrcnn_data* read_qvrcnn(void)
{
struct qvrcnn_data net_data_HWCN;
struct qvrcnn_data* net_data_NHWC = new struct qvrcnn_data;
FILE *fp = NULL;
if (fopen_s(&fp, "model\\qvrcnn_ppro_8bit_27.data", "rb"))
printf("open file failed\n");
fseek(fp, sizeof(int), SEEK_CUR);
fread(net_data_HWCN.weight, sizeof(char), 5 * 5 * 64, fp);
fread(net_data_NHWC->bias, sizeof(int), 64, fp);
fclose(fp);
HWCN2NHWC(net_data_HWCN.weight, net_data_NHWC->weight, 5, 5, 1, 64);//convert format
return net_data_NHWC;
}
int main(int argc, char** argv)
{
int num_gpus;
hipblasHandle_t cublasHandle;
cudnnHandle_t cudnnHandle;
cudnnTensorDescriptor_t dataTensor, conv1Tensor, conv1BiasTensor;//
cudnnFilterDescriptor_t conv1filterDesc;//
cudnnConvolutionDescriptor_t conv1Desc;//
cudnnConvolutionFwdAlgoPerf_t perfResults[8];
size_t sizeInBytes;
YChannel *ydata;
Res *ydata_reg;
qvrcnn_data* net_data;
int batch = 1, channel = 0, height = 240, width = 416, return_value, return_value1;
std::stringstream filename;
hipGetDeviceCount(&num_gpus);
hipSetDevice(0);
hipblasCreate(&cublasHandle);
cudnnCreate(&cudnnHandle);
cudnnCreateTensorDescriptor(&dataTensor);//
cudnnCreateTensorDescriptor(&conv1Tensor);
cudnnCreateTensorDescriptor(&conv1BiasTensor);
cudnnCreateFilterDescriptor(&conv1filterDesc);//
cudnnCreateConvolutionDescriptor(&conv1Desc);//
//
return_value = cudnnSetTensor4dDescriptor(conv1BiasTensor,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_INT32,
1, 64, 1, 1);
return_value = cudnnSetTensor4dDescriptor(dataTensor,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_INT8,
1, 1, height, width);
return_value = cudnnSetFilter4dDescriptor(conv1filterDesc,
CUDNN_DATA_INT8,
CUDNN_TENSOR_NHWC,
64, 1, 5, 5);
return_value = cudnnSetConvolution2dDescriptor(conv1Desc,
2, 2,
1, 1,
1, 1,
CUDNN_CROSS_CORRELATION,
CUDNN_DATA_INT32);
return_value = cudnnGetConvolution2dForwardOutputDim(conv1Desc,
dataTensor,
conv1filterDesc,
&batch, &channel, &height, &width);
return_value = cudnnSetTensor4dDescriptor(conv1Tensor,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_FLOAT,
1, 64, height, width);
cudnnGetConvolutionForwardAlgorithmMaxCount(cudnnHandle, &return_value);
return_value = cudnnGetConvolutionForwardAlgorithm_v7(cudnnHandle,
dataTensor,
conv1filterDesc,
conv1Desc,
conv1Tensor,
8,
&return_value1,
perfResults);
return_value = cudnnGetConvolutionForwardWorkspaceSize(cudnnHandle,
dataTensor,
conv1filterDesc,
conv1Desc,
conv1Tensor,
perfResults[0].algo,
&sizeInBytes);
char *x;
float *conv,*conv_h;//
char *w;
int *b;//
void *d_cudnn_workspace = nullptr;//
float alpha = 1.0f, beta = 0.0f;
clock_t start_t, end_t;
double total_t;
int i;
//
net_data = read_qvrcnn();
filename << "data\\BlowingBubbles_intra_main_HM16.7_anchor_416x240_10_Q27.yuv";
ydata = get_Y(filename.str().c_str(), batch, height, width);
ydata_reg = regularize(ydata);
return_value = hipMalloc(&x, sizeof(char) * 416 * 240);//GPU
return_value = hipMalloc(&w, sizeof(char) * 5 * 5 * 64);
return_value = hipMalloc(&b, sizeof(int) * 64);
return_value = hipMalloc(&conv, sizeof(float) * 64 * 416 * 240);
conv_h = (float*)malloc(sizeof(float) * 64 * 416 * 240);
if (sizeInBytes > 0)
return_value = hipMalloc(&d_cudnn_workspace, sizeInBytes);//
return_value = hipMemcpyAsync(w, net_data->weight, sizeof(char) * 5 * 5 * 64, hipMemcpyHostToDevice);//GPU
return_value = hipMemcpyAsync(x, ydata_reg->data,sizeof(char) * ydata->frames*ydata->h*ydata->w, hipMemcpyHostToDevice);//GPU
start_t = clock();
for (i = 0;i < 10000;i++)
{
return_value = cudnnConvolutionForward(cudnnHandle, &alpha, dataTensor,
x, conv1filterDesc, w, conv1Desc,
perfResults[0].algo, d_cudnn_workspace, sizeInBytes, &beta,
conv1Tensor, conv);//
return_value = hipDeviceSynchronize();//GPU
}
end_t = clock();
total_t = (double)(end_t - start_t) / CLOCKS_PER_SEC;
printf("%f", total_t);
return_value = hipMemcpy(conv_h, conv, sizeof(float) * 64 * 416 * 240, hipMemcpyDeviceToHost);
//debug
//hipMemcpy(&conv1.pconv[0], d_pconv1, sizeof(float) * conv1.pconv.size(), hipMemcpyDeviceToHost);//GPU
return_value = hipFree(x);//
return_value = hipFree(w);
return_value = hipFree(b);
return_value = hipFree(conv);
return_value = hipFree(d_cudnn_workspace);
system("pause");
return 0;
}
*/
#include <stdio.h>
#include <sstream>
#include <iostream>
#include <time.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <rocblas.h>
#include <cudnn.h>
#include "yuv_data.h"
struct vrcnn_data {
float weight[5 * 5 * 64];
float bias[64];
};//
void HWCN2NHWC(float *HWCN, float *NHWC, int H, int W, int C, int N)
{
int i, j, k, m;
for (i = 0;i < H;i++)
for (j = 0;j < W;j++)
for (k = 0;k < C;k++)
for (m = 0;m < N;m++)
NHWC[m*H*W*C + i*W*C + j*C + k] = HWCN[i*W*C*N + j*C*N + k*N + m];
}
struct vrcnn_data* read_vrcnn(void)
{
struct vrcnn_data net_data_HWCN;
struct vrcnn_data* net_data_NHWC = new struct vrcnn_data;
FILE *fp = NULL;
if (fopen_s(&fp, "model\\vrcnn_ppro_27.data", "rb"))
printf("open file failed\n");
fread(net_data_HWCN.weight, sizeof(float), 5 * 5 * 64, fp);
fread(net_data_NHWC->bias, sizeof(float), 64, fp);
fclose(fp);
HWCN2NHWC(net_data_HWCN.weight, net_data_NHWC->weight, 5, 5, 1, 64);//convert format
return net_data_NHWC;
}
float *regularizef(YChannel *ydata)
{
int i;
float *reg = (float*)malloc(sizeof(float)*ydata->frames*ydata->h*ydata->w);
for (i = 0; i < ydata->frames*ydata->h*ydata->w; i++)
reg[i] = (int)ydata->ImgData[i] - 128;
return reg;
}
int main(int argc, char** argv)
{
int num_gpus;
hipblasHandle_t cublasHandle;
cudnnHandle_t cudnnHandle;
cudnnTensorDescriptor_t dataTensor, conv1Tensor, conv1BiasTensor;//
cudnnFilterDescriptor_t conv1filterDesc;//
cudnnConvolutionDescriptor_t conv1Desc;//
cudnnConvolutionFwdAlgoPerf_t perfResults[8];
size_t sizeInBytes;
YChannel *ydata;
float *ydata_reg;
vrcnn_data* net_data;
int batch = 1, channel = 0, height = 240, width = 416, return_value, return_value1;
std::stringstream filename;
hipGetDeviceCount(&num_gpus);
hipSetDevice(0);
hipblasCreate(&cublasHandle);
cudnnCreate(&cudnnHandle);
cudnnCreateTensorDescriptor(&dataTensor);//
cudnnCreateTensorDescriptor(&conv1Tensor);
cudnnCreateTensorDescriptor(&conv1BiasTensor);
cudnnCreateFilterDescriptor(&conv1filterDesc);//
cudnnCreateConvolutionDescriptor(&conv1Desc);//
//
return_value = cudnnSetTensor4dDescriptor(conv1BiasTensor,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_FLOAT,
1, 64, 1, 1);
return_value = cudnnSetTensor4dDescriptor(dataTensor,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_FLOAT,
1, 1, height, width);
return_value = cudnnSetFilter4dDescriptor(conv1filterDesc,
CUDNN_DATA_FLOAT,
CUDNN_TENSOR_NHWC,
64, 1, 5, 5);
return_value = cudnnSetConvolution2dDescriptor(conv1Desc,
2, 2,
1, 1,
1, 1,
CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
return_value = cudnnGetConvolution2dForwardOutputDim(conv1Desc,
dataTensor,
conv1filterDesc,
&batch, &channel, &height, &width);
return_value = cudnnSetTensor4dDescriptor(conv1Tensor,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_FLOAT,
1, 64, height, width);
cudnnGetConvolutionForwardAlgorithmMaxCount(cudnnHandle, &return_value);
return_value = cudnnGetConvolutionForwardAlgorithm_v7(cudnnHandle,
dataTensor,
conv1filterDesc,
conv1Desc,
conv1Tensor,
8,
&return_value1,
perfResults);
return_value = cudnnGetConvolutionForwardWorkspaceSize(cudnnHandle,
dataTensor,
conv1filterDesc,
conv1Desc,
conv1Tensor,
perfResults[0].algo,
&sizeInBytes);
char *x;
float *conv, *conv_h;//
float *w;
int *b;//
void *d_cudnn_workspace = nullptr;//
float alpha = 1.0f, beta = 0.0f;
clock_t start_t, end_t;
double total_t;
int i;
//
net_data = read_vrcnn();
filename << "data\\BlowingBubbles_intra_main_HM16.7_anchor_416x240_10_Q27.yuv";
ydata = get_Y(filename.str().c_str(), batch, height, width);
ydata_reg = regularizef(ydata);
return_value = hipMalloc(&x, sizeof(float) * 416 * 240);//GPU
return_value = hipMalloc(&w, sizeof(float) * 5 * 5 * 64);
return_value = hipMalloc(&b, sizeof(float) * 64);
return_value = hipMalloc(&conv, sizeof(float) * 64 * 416 * 240);
conv_h = (float*)malloc(sizeof(float) * 64 * 416 * 240);
if (sizeInBytes > 0)
return_value = hipMalloc(&d_cudnn_workspace, sizeInBytes);//
return_value = hipMemcpyAsync(w, net_data->weight, sizeof(float) * 5 * 5 * 64, hipMemcpyHostToDevice);//GPU
return_value = hipMemcpyAsync(x, ydata_reg, sizeof(float) * ydata->frames*ydata->h*ydata->w, hipMemcpyHostToDevice);//GPU
start_t = clock();
for (i = 0;i < 10000;i++)
{
return_value = cudnnConvolutionForward(cudnnHandle, &alpha, dataTensor,
x, conv1filterDesc, w, conv1Desc,
perfResults[0].algo, d_cudnn_workspace, sizeInBytes, &beta,
conv1Tensor, conv);//
return_value = hipDeviceSynchronize();//GPU
}
end_t = clock();
total_t = (double)(end_t - start_t) / CLOCKS_PER_SEC;
printf("%f", total_t);
return_value = hipMemcpy(conv_h, conv, sizeof(float) * 64 * 416 * 240, hipMemcpyDeviceToHost);
//debug
//hipMemcpy(&conv1.pconv[0], d_pconv1, sizeof(float) * conv1.pconv.size(), hipMemcpyDeviceToHost);//GPU
return_value = hipFree(x);//
return_value = hipFree(w);
return_value = hipFree(b);
return_value = hipFree(conv);
return_value = hipFree(d_cudnn_workspace);
system("pause");
return 0;
} | cf2e7f37003161248c78e8aa671166ec42252b25.cu | /*
#include <stdio.h>
#include <sstream>
#include <iostream>
#include <time.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <cublas_v2.h>
#include <cudnn.h>
#include "yuv_data.h"
struct qvrcnn_data {
char weight[5 * 5 * 64];
int bias[64];
};//实验性,仅读取第一层
void HWCN2NHWC(char *HWCN, char *NHWC, int H, int W, int C, int N)
{
int i, j, k, m;
for (i = 0;i < H;i++)
for (j = 0;j < W;j++)
for (k = 0;k < C;k++)
for (m = 0;m < N;m++)
NHWC[m*H*W*C + i*W*C + j*C + k] = HWCN[i*W*C*N + j*C*N + k*N + m];
}
struct qvrcnn_data* read_qvrcnn(void)
{
struct qvrcnn_data net_data_HWCN;
struct qvrcnn_data* net_data_NHWC = new struct qvrcnn_data;
FILE *fp = NULL;
if (fopen_s(&fp, "model\\qvrcnn_ppro_8bit_27.data", "rb"))
printf("open file failed\n");
fseek(fp, sizeof(int), SEEK_CUR);
fread(net_data_HWCN.weight, sizeof(char), 5 * 5 * 64, fp);
fread(net_data_NHWC->bias, sizeof(int), 64, fp);
fclose(fp);
HWCN2NHWC(net_data_HWCN.weight, net_data_NHWC->weight, 5, 5, 1, 64);//convert format
return net_data_NHWC;
}
int main(int argc, char** argv)
{
int num_gpus;
cublasHandle_t cublasHandle;
cudnnHandle_t cudnnHandle;
cudnnTensorDescriptor_t dataTensor, conv1Tensor, conv1BiasTensor;//数据和偏置描述符
cudnnFilterDescriptor_t conv1filterDesc;//权重描述符
cudnnConvolutionDescriptor_t conv1Desc;//卷积描述符
cudnnConvolutionFwdAlgoPerf_t perfResults[8];
size_t sizeInBytes;
YChannel *ydata;
Res *ydata_reg;
qvrcnn_data* net_data;
int batch = 1, channel = 0, height = 240, width = 416, return_value, return_value1;
std::stringstream filename;
cudaGetDeviceCount(&num_gpus);
cudaSetDevice(0);
cublasCreate(&cublasHandle);
cudnnCreate(&cudnnHandle);
cudnnCreateTensorDescriptor(&dataTensor);//初始化张量描述符
cudnnCreateTensorDescriptor(&conv1Tensor);
cudnnCreateTensorDescriptor(&conv1BiasTensor);
cudnnCreateFilterDescriptor(&conv1filterDesc);//初始化权重描述符
cudnnCreateConvolutionDescriptor(&conv1Desc);//初始化卷积描述符
//设置卷积描述符
return_value = cudnnSetTensor4dDescriptor(conv1BiasTensor,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_INT32,
1, 64, 1, 1);
return_value = cudnnSetTensor4dDescriptor(dataTensor,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_INT8,
1, 1, height, width);
return_value = cudnnSetFilter4dDescriptor(conv1filterDesc,
CUDNN_DATA_INT8,
CUDNN_TENSOR_NHWC,
64, 1, 5, 5);
return_value = cudnnSetConvolution2dDescriptor(conv1Desc,
2, 2,
1, 1,
1, 1,
CUDNN_CROSS_CORRELATION,
CUDNN_DATA_INT32);
return_value = cudnnGetConvolution2dForwardOutputDim(conv1Desc,
dataTensor,
conv1filterDesc,
&batch, &channel, &height, &width);
return_value = cudnnSetTensor4dDescriptor(conv1Tensor,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_FLOAT,
1, 64, height, width);
cudnnGetConvolutionForwardAlgorithmMaxCount(cudnnHandle, &return_value);
return_value = cudnnGetConvolutionForwardAlgorithm_v7(cudnnHandle,
dataTensor,
conv1filterDesc,
conv1Desc,
conv1Tensor,
8,
&return_value1,
perfResults);
return_value = cudnnGetConvolutionForwardWorkspaceSize(cudnnHandle,
dataTensor,
conv1filterDesc,
conv1Desc,
conv1Tensor,
perfResults[0].algo,
&sizeInBytes);
char *x;
float *conv,*conv_h;//前向传播数据
char *w;
int *b;//网络参数
void *d_cudnn_workspace = nullptr;//缓存和工作空间
float alpha = 1.0f, beta = 0.0f;
clock_t start_t, end_t;
double total_t;
int i;
//读取网络和数据
net_data = read_qvrcnn();
filename << "data\\BlowingBubbles_intra_main_HM16.7_anchor_416x240_10_Q27.yuv";
ydata = get_Y(filename.str().c_str(), batch, height, width);
ydata_reg = regularize(ydata);
return_value = cudaMalloc(&x, sizeof(char) * 416 * 240);//在GPU中分配空间
return_value = cudaMalloc(&w, sizeof(char) * 5 * 5 * 64);
return_value = cudaMalloc(&b, sizeof(int) * 64);
return_value = cudaMalloc(&conv, sizeof(float) * 64 * 416 * 240);
conv_h = (float*)malloc(sizeof(float) * 64 * 416 * 240);
if (sizeInBytes > 0)
return_value = cudaMalloc(&d_cudnn_workspace, sizeInBytes);//分配工作空间
return_value = cudaMemcpyAsync(w, net_data->weight, sizeof(char) * 5 * 5 * 64, cudaMemcpyHostToDevice);//拷贝网络到GPU
return_value = cudaMemcpyAsync(x, ydata_reg->data,sizeof(char) * ydata->frames*ydata->h*ydata->w, cudaMemcpyHostToDevice);//拷贝数据到GPU
start_t = clock();
for (i = 0;i < 10000;i++)
{
return_value = cudnnConvolutionForward(cudnnHandle, &alpha, dataTensor,
x, conv1filterDesc, w, conv1Desc,
perfResults[0].algo, d_cudnn_workspace, sizeInBytes, &beta,
conv1Tensor, conv);//进行一次卷积运算
return_value = cudaDeviceSynchronize();//同步GPU
}
end_t = clock();
total_t = (double)(end_t - start_t) / CLOCKS_PER_SEC;
printf("%f", total_t);
return_value = cudaMemcpy(conv_h, conv, sizeof(float) * 64 * 416 * 240, cudaMemcpyDeviceToHost);
//到此步即可完成debug
//cudaMemcpy(&conv1.pconv[0], d_pconv1, sizeof(float) * conv1.pconv.size(), cudaMemcpyDeviceToHost);//从GPU中拷贝出数据
return_value = cudaFree(x);//释放内存
return_value = cudaFree(w);
return_value = cudaFree(b);
return_value = cudaFree(conv);
return_value = cudaFree(d_cudnn_workspace);
system("pause");
return 0;
}
*/
#include <stdio.h>
#include <sstream>
#include <iostream>
#include <time.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <cublas_v2.h>
#include <cudnn.h>
#include "yuv_data.h"
struct vrcnn_data {
float weight[5 * 5 * 64];
float bias[64];
};//实验性,仅读取第一层
void HWCN2NHWC(float *HWCN, float *NHWC, int H, int W, int C, int N)
{
int i, j, k, m;
for (i = 0;i < H;i++)
for (j = 0;j < W;j++)
for (k = 0;k < C;k++)
for (m = 0;m < N;m++)
NHWC[m*H*W*C + i*W*C + j*C + k] = HWCN[i*W*C*N + j*C*N + k*N + m];
}
struct vrcnn_data* read_vrcnn(void)
{
struct vrcnn_data net_data_HWCN;
struct vrcnn_data* net_data_NHWC = new struct vrcnn_data;
FILE *fp = NULL;
if (fopen_s(&fp, "model\\vrcnn_ppro_27.data", "rb"))
printf("open file failed\n");
fread(net_data_HWCN.weight, sizeof(float), 5 * 5 * 64, fp);
fread(net_data_NHWC->bias, sizeof(float), 64, fp);
fclose(fp);
HWCN2NHWC(net_data_HWCN.weight, net_data_NHWC->weight, 5, 5, 1, 64);//convert format
return net_data_NHWC;
}
float *regularizef(YChannel *ydata)
{
int i;
float *reg = (float*)malloc(sizeof(float)*ydata->frames*ydata->h*ydata->w);
for (i = 0; i < ydata->frames*ydata->h*ydata->w; i++)
reg[i] = (int)ydata->ImgData[i] - 128;
return reg;
}
int main(int argc, char** argv)
{
int num_gpus;
cublasHandle_t cublasHandle;
cudnnHandle_t cudnnHandle;
cudnnTensorDescriptor_t dataTensor, conv1Tensor, conv1BiasTensor;//数据和偏置描述符
cudnnFilterDescriptor_t conv1filterDesc;//权重描述符
cudnnConvolutionDescriptor_t conv1Desc;//卷积描述符
cudnnConvolutionFwdAlgoPerf_t perfResults[8];
size_t sizeInBytes;
YChannel *ydata;
float *ydata_reg;
vrcnn_data* net_data;
int batch = 1, channel = 0, height = 240, width = 416, return_value, return_value1;
std::stringstream filename;
cudaGetDeviceCount(&num_gpus);
cudaSetDevice(0);
cublasCreate(&cublasHandle);
cudnnCreate(&cudnnHandle);
cudnnCreateTensorDescriptor(&dataTensor);//初始化张量描述符
cudnnCreateTensorDescriptor(&conv1Tensor);
cudnnCreateTensorDescriptor(&conv1BiasTensor);
cudnnCreateFilterDescriptor(&conv1filterDesc);//初始化权重描述符
cudnnCreateConvolutionDescriptor(&conv1Desc);//初始化卷积描述符
//设置卷积描述符
return_value = cudnnSetTensor4dDescriptor(conv1BiasTensor,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_FLOAT,
1, 64, 1, 1);
return_value = cudnnSetTensor4dDescriptor(dataTensor,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_FLOAT,
1, 1, height, width);
return_value = cudnnSetFilter4dDescriptor(conv1filterDesc,
CUDNN_DATA_FLOAT,
CUDNN_TENSOR_NHWC,
64, 1, 5, 5);
return_value = cudnnSetConvolution2dDescriptor(conv1Desc,
2, 2,
1, 1,
1, 1,
CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
return_value = cudnnGetConvolution2dForwardOutputDim(conv1Desc,
dataTensor,
conv1filterDesc,
&batch, &channel, &height, &width);
return_value = cudnnSetTensor4dDescriptor(conv1Tensor,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_FLOAT,
1, 64, height, width);
cudnnGetConvolutionForwardAlgorithmMaxCount(cudnnHandle, &return_value);
return_value = cudnnGetConvolutionForwardAlgorithm_v7(cudnnHandle,
dataTensor,
conv1filterDesc,
conv1Desc,
conv1Tensor,
8,
&return_value1,
perfResults);
return_value = cudnnGetConvolutionForwardWorkspaceSize(cudnnHandle,
dataTensor,
conv1filterDesc,
conv1Desc,
conv1Tensor,
perfResults[0].algo,
&sizeInBytes);
char *x;
float *conv, *conv_h;//前向传播数据
float *w;
int *b;//网络参数
void *d_cudnn_workspace = nullptr;//缓存和工作空间
float alpha = 1.0f, beta = 0.0f;
clock_t start_t, end_t;
double total_t;
int i;
//读取网络和数据
net_data = read_vrcnn();
filename << "data\\BlowingBubbles_intra_main_HM16.7_anchor_416x240_10_Q27.yuv";
ydata = get_Y(filename.str().c_str(), batch, height, width);
ydata_reg = regularizef(ydata);
return_value = cudaMalloc(&x, sizeof(float) * 416 * 240);//在GPU中分配空间
return_value = cudaMalloc(&w, sizeof(float) * 5 * 5 * 64);
return_value = cudaMalloc(&b, sizeof(float) * 64);
return_value = cudaMalloc(&conv, sizeof(float) * 64 * 416 * 240);
conv_h = (float*)malloc(sizeof(float) * 64 * 416 * 240);
if (sizeInBytes > 0)
return_value = cudaMalloc(&d_cudnn_workspace, sizeInBytes);//分配工作空间
return_value = cudaMemcpyAsync(w, net_data->weight, sizeof(float) * 5 * 5 * 64, cudaMemcpyHostToDevice);//拷贝网络到GPU
return_value = cudaMemcpyAsync(x, ydata_reg, sizeof(float) * ydata->frames*ydata->h*ydata->w, cudaMemcpyHostToDevice);//拷贝数据到GPU
start_t = clock();
for (i = 0;i < 10000;i++)
{
return_value = cudnnConvolutionForward(cudnnHandle, &alpha, dataTensor,
x, conv1filterDesc, w, conv1Desc,
perfResults[0].algo, d_cudnn_workspace, sizeInBytes, &beta,
conv1Tensor, conv);//进行一次卷积运算
return_value = cudaDeviceSynchronize();//同步GPU
}
end_t = clock();
total_t = (double)(end_t - start_t) / CLOCKS_PER_SEC;
printf("%f", total_t);
return_value = cudaMemcpy(conv_h, conv, sizeof(float) * 64 * 416 * 240, cudaMemcpyDeviceToHost);
//到此步即可完成debug
//cudaMemcpy(&conv1.pconv[0], d_pconv1, sizeof(float) * conv1.pconv.size(), cudaMemcpyDeviceToHost);//从GPU中拷贝出数据
return_value = cudaFree(x);//释放内存
return_value = cudaFree(w);
return_value = cudaFree(b);
return_value = cudaFree(conv);
return_value = cudaFree(d_cudnn_workspace);
system("pause");
return 0;
} |
8b8b7e427a31c42d6f48ff2e1d9d7ee38923bf9d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2008-2012, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include <opencv2/gpu/device/common.hpp>
#include <opencv2/gpu/device/saturate_cast.hpp>
#include <icf.hpp>
#include <float.h>
#include <stdio.h>
namespace cv { namespace gpu { namespace device {
namespace icf {
template <int FACTOR>
__device__ __forceinline__ uchar shrink(const uchar* ptr, const int pitch, const int y, const int x)
{
int out = 0;
#pragma unroll
for(int dy = 0; dy < FACTOR; ++dy)
#pragma unroll
for(int dx = 0; dx < FACTOR; ++dx)
{
out += ptr[dy * pitch + dx];
}
return static_cast<uchar>(out / (FACTOR * FACTOR));
}
template<int FACTOR>
__global__ void shrink(const uchar* __restrict__ hogluv, const int inPitch,
uchar* __restrict__ shrank, const int outPitch )
{
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const uchar* ptr = hogluv + (FACTOR * y) * inPitch + (FACTOR * x);
shrank[ y * outPitch + x] = shrink<FACTOR>(ptr, inPitch, y, x);
}
void shrink(const cv::gpu::PtrStepSzb& channels, cv::gpu::PtrStepSzb shrunk)
{
dim3 block(32, 8);
dim3 grid(shrunk.cols / 32, shrunk.rows / 8);
hipLaunchKernelGGL(( shrink<4>), dim3(grid), dim3(block), 0, 0, (uchar*)channels.ptr(), channels.step, (uchar*)shrunk.ptr(), shrunk.step);
cudaSafeCall(hipDeviceSynchronize());
}
__device__ __forceinline__ void luv(const float& b, const float& g, const float& r, uchar& __l, uchar& __u, uchar& __v)
{
// rgb -> XYZ
float x = 0.412453f * r + 0.357580f * g + 0.180423f * b;
float y = 0.212671f * r + 0.715160f * g + 0.072169f * b;
float z = 0.019334f * r + 0.119193f * g + 0.950227f * b;
// computed for D65
const float _ur = 0.19783303699678276f;
const float _vr = 0.46833047435252234f;
const float divisor = fmax((x + 15.f * y + 3.f * z), FLT_EPSILON);
const float _u = __fdividef(4.f * x, divisor);
const float _v = __fdividef(9.f * y, divisor);
float hack = static_cast<float>(__float2int_rn(y * 2047)) / 2047;
const float L = fmax(0.f, ((116.f * cbrtf(hack)) - 16.f));
const float U = 13.f * L * (_u - _ur);
const float V = 13.f * L * (_v - _vr);
// L in [0, 100], u in [-134, 220], v in [-140, 122]
__l = static_cast<uchar>( L * (255.f / 100.f));
__u = static_cast<uchar>((U + 134.f) * (255.f / (220.f + 134.f )));
__v = static_cast<uchar>((V + 140.f) * (255.f / (122.f + 140.f )));
}
__global__ void bgr2Luv_d(const uchar* rgb, const int rgbPitch, uchar* luvg, const int luvgPitch)
{
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
uchar3 color = ((uchar3*)(rgb + rgbPitch * y))[x];
uchar l, u, v;
luv(color.x / 255.f, color.y / 255.f, color.z / 255.f, l, u, v);
luvg[luvgPitch * y + x] = l;
luvg[luvgPitch * (y + 480) + x] = u;
luvg[luvgPitch * (y + 2 * 480) + x] = v;
}
void bgr2Luv(const PtrStepSzb& bgr, PtrStepSzb luv)
{
dim3 block(32, 8);
dim3 grid(bgr.cols / 32, bgr.rows / 8);
hipLaunchKernelGGL(( bgr2Luv_d), dim3(grid), dim3(block), 0, 0, (const uchar*)bgr.ptr(0), bgr.step, (uchar*)luv.ptr(0), luv.step);
cudaSafeCall(hipDeviceSynchronize());
}
template<bool isDefaultNum>
__device__ __forceinline__ int fast_angle_bin(const float& dx, const float& dy)
{
const float angle_quantum = CV_PI / 6.f;
float angle = atan2(dx, dy) + (angle_quantum / 2.f);
if (angle < 0) angle += CV_PI;
const float angle_scaling = 1.f / angle_quantum;
return static_cast<int>(angle * angle_scaling) % 6;
}
template<>
__device__ __forceinline__ int fast_angle_bin<true>(const float& dy, const float& dx)
{
int index = 0;
float max_dot = fabs(dx);
{
const float dot_product = fabs(dx * 0.8660254037844386f + dy * 0.5f);
if(dot_product > max_dot)
{
max_dot = dot_product;
index = 1;
}
}
{
const float dot_product = fabs(dy * 0.8660254037844386f + dx * 0.5f);
if(dot_product > max_dot)
{
max_dot = dot_product;
index = 2;
}
}
{
int i = 3;
float2 bin_vector_i;
bin_vector_i.x = ::cos(i * (CV_PI / 6.f));
bin_vector_i.y = ::sin(i * (CV_PI / 6.f));
const float dot_product = fabs(dx * bin_vector_i.x + dy * bin_vector_i.y);
if(dot_product > max_dot)
{
max_dot = dot_product;
index = i;
}
}
{
const float dot_product = fabs(dx * (-0.4999999999999998f) + dy * 0.8660254037844387f);
if(dot_product > max_dot)
{
max_dot = dot_product;
index = 4;
}
}
{
const float dot_product = fabs(dx * (-0.8660254037844387f) + dy * 0.49999999999999994f);
if(dot_product > max_dot)
{
max_dot = dot_product;
index = 5;
}
}
return index;
}
texture<uchar, hipTextureType2D, hipReadModeElementType> tgray;
template<bool isDefaultNum>
__global__ void gray2hog(PtrStepSzb mag)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const float dx = tex2D(tgray, x + 1, y + 0) - tex2D(tgray, x - 1, y - 0);
const float dy = tex2D(tgray, x + 0, y + 1) - tex2D(tgray, x - 0, y - 1);
const float magnitude = sqrtf((dx * dx) + (dy * dy)) * (1.0f / sqrtf(2));
const uchar cmag = static_cast<uchar>(magnitude);
mag( 480 * 6 + y, x) = cmag;
mag( 480 * fast_angle_bin<isDefaultNum>(dy, dx) + y, x) = cmag;
}
void gray2hog(const PtrStepSzb& gray, PtrStepSzb mag, const int bins)
{
dim3 block(32, 8);
dim3 grid(gray.cols / 32, gray.rows / 8);
hipChannelFormatDesc desc = hipCreateChannelDesc<uchar>();
cudaSafeCall( hipBindTexture2D(0, tgray, gray.data, desc, gray.cols, gray.rows, gray.step) );
if (bins == 6)
hipLaunchKernelGGL(( gray2hog<true>), dim3(grid), dim3(block), 0, 0, mag);
else
hipLaunchKernelGGL(( gray2hog<false>), dim3(grid), dim3(block), 0, 0, mag);
cudaSafeCall(hipDeviceSynchronize());
}
// ToDo: use textures or uncached load instruction.
__global__ void magToHist(const uchar* __restrict__ mag,
const float* __restrict__ angle, const int angPitch,
uchar* __restrict__ hog, const int hogPitch, const int fh)
{
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int bin = (int)(angle[y * angPitch + x]);
const uchar val = mag[y * hogPitch + x];
hog[((fh * bin) + y) * hogPitch + x] = val;
}
void fillBins(cv::gpu::PtrStepSzb hogluv, const cv::gpu::PtrStepSzf& nangle,
const int fw, const int fh, const int bins, hipStream_t stream )
{
const uchar* mag = (const uchar*)hogluv.ptr(fh * bins);
uchar* hog = (uchar*)hogluv.ptr();
const float* angle = (const float*)nangle.ptr();
dim3 block(32, 8);
dim3 grid(fw / 32, fh / 8);
hipLaunchKernelGGL(( magToHist), dim3(grid), dim3(block), 0, stream, mag, angle, nangle.step / sizeof(float), hog, hogluv.step, fh);
if (!stream)
{
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
}
__device__ __forceinline__ float overlapArea(const Detection &a, const Detection &b)
{
int w = ::min(a.x + a.w, b.x + b.w) - ::max(a.x, b.x);
int h = ::min(a.y + a.h, b.y + b.h) - ::max(a.y, b.y);
return (w < 0 || h < 0)? 0.f : (float)(w * h);
}
texture<uint4, hipTextureType2D, hipReadModeElementType> tdetections;
__global__ void overlap(const uint* n, uchar* overlaps)
{
const int idx = threadIdx.x;
const int total = *n;
for (int i = idx + 1; i < total; i += 192)
{
const uint4 _a = tex2D(tdetections, i, 0);
const Detection& a = *((Detection*)(&_a));
bool excluded = false;
for (int j = i + 1; j < total; ++j)
{
const uint4 _b = tex2D(tdetections, j, 0);
const Detection& b = *((Detection*)(&_b));
float ovl = overlapArea(a, b) / ::min(a.w * a.h, b.w * b.h);
if (ovl > 0.65f)
{
int suppessed = (a.confidence > b.confidence)? j : i;
overlaps[suppessed] = 1;
excluded = excluded || (suppessed == i);
}
#if __CUDA_ARCH__ >= 120
if (__all(excluded)) break;
#endif
}
}
}
__global__ void collect(const uint* n, uchar* overlaps, uint* ctr, uint4* suppressed)
{
const int idx = threadIdx.x;
const int total = *n;
for (int i = idx; i < total; i += 192)
{
if (!overlaps[i])
{
int oidx = atomicInc(ctr, 50);
suppressed[oidx] = tex2D(tdetections, i + 1, 0);
}
}
}
void suppress(const PtrStepSzb& objects, PtrStepSzb overlaps, PtrStepSzi ndetections,
PtrStepSzb suppressed, hipStream_t stream)
{
int block = 192;
int grid = 1;
hipChannelFormatDesc desc = hipCreateChannelDesc<uint4>();
size_t offset;
cudaSafeCall( hipBindTexture2D(&offset, tdetections, objects.data, desc, objects.cols / sizeof(uint4), objects.rows, objects.step));
hipLaunchKernelGGL(( overlap), dim3(grid), dim3(block), 0, 0, (uint*)ndetections.ptr(0), (uchar*)overlaps.ptr(0));
hipLaunchKernelGGL(( collect), dim3(grid), dim3(block), 0, 0, (uint*)ndetections.ptr(0), (uchar*)overlaps.ptr(0), (uint*)suppressed.ptr(0), ((uint4*)suppressed.ptr(0)) + 1);
if (!stream)
{
cudaSafeCall( hipGetLastError());
cudaSafeCall( hipDeviceSynchronize());
}
}
template<typename Policy>
struct PrefixSum
{
__device static void apply(float& impact)
{
#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300
#pragma unroll
// scan on shuffle functions
for (int i = 1; i < Policy::WARP; i *= 2)
{
const float n = __shfl_up(impact, i, Policy::WARP);
if (threadIdx.x >= i)
impact += n;
}
#else
__shared__ volatile float ptr[Policy::STA_X * Policy::STA_Y];
const int idx = threadIdx.y * Policy::STA_X + threadIdx.x;
ptr[idx] = impact;
if ( threadIdx.x >= 1) ptr [idx ] = (ptr [idx - 1] + ptr [idx]);
if ( threadIdx.x >= 2) ptr [idx ] = (ptr [idx - 2] + ptr [idx]);
if ( threadIdx.x >= 4) ptr [idx ] = (ptr [idx - 4] + ptr [idx]);
if ( threadIdx.x >= 8) ptr [idx ] = (ptr [idx - 8] + ptr [idx]);
if ( threadIdx.x >= 16) ptr [idx ] = (ptr [idx - 16] + ptr [idx]);
impact = ptr[idx];
#endif
}
};
texture<int, hipTextureType2D, hipReadModeElementType> thogluv;
template<bool isUp>
__device__ __forceinline__ float rescale(const Level& level, Node& node)
{
uchar4& scaledRect = node.rect;
float relScale = level.relScale;
float farea = (scaledRect.z - scaledRect.x) * (scaledRect.w - scaledRect.y);
// rescale
scaledRect.x = __float2int_rn(relScale * scaledRect.x);
scaledRect.y = __float2int_rn(relScale * scaledRect.y);
scaledRect.z = __float2int_rn(relScale * scaledRect.z);
scaledRect.w = __float2int_rn(relScale * scaledRect.w);
float sarea = (scaledRect.z - scaledRect.x) * (scaledRect.w - scaledRect.y);
const float expected_new_area = farea * relScale * relScale;
float approx = (sarea == 0)? 1: __fdividef(sarea, expected_new_area);
float rootThreshold = (node.threshold & 0x0FFFFFFFU) * approx * level.scaling[(node.threshold >> 28) > 6];
return rootThreshold;
}
template<>
__device__ __forceinline__ float rescale<true>(const Level& level, Node& node)
{
uchar4& scaledRect = node.rect;
float relScale = level.relScale;
float farea = scaledRect.z * scaledRect.w;
// rescale
scaledRect.x = __float2int_rn(relScale * scaledRect.x);
scaledRect.y = __float2int_rn(relScale * scaledRect.y);
scaledRect.z = __float2int_rn(relScale * scaledRect.z);
scaledRect.w = __float2int_rn(relScale * scaledRect.w);
float sarea = scaledRect.z * scaledRect.w;
const float expected_new_area = farea * relScale * relScale;
float approx = __fdividef(sarea, expected_new_area);
float rootThreshold = (node.threshold & 0x0FFFFFFFU) * approx * level.scaling[(node.threshold >> 28) > 6];
return rootThreshold;
}
template<bool isUp>
__device__ __forceinline__ int get(int x, int y, uchar4 area)
{
int a = tex2D(thogluv, x + area.x, y + area.y);
int b = tex2D(thogluv, x + area.z, y + area.y);
int c = tex2D(thogluv, x + area.z, y + area.w);
int d = tex2D(thogluv, x + area.x, y + area.w);
return (a - b + c - d);
}
template<>
__device__ __forceinline__ int get<true>(int x, int y, uchar4 area)
{
x += area.x;
y += area.y;
int a = tex2D(thogluv, x, y);
int b = tex2D(thogluv, x + area.z, y);
int c = tex2D(thogluv, x + area.z, y + area.w);
int d = tex2D(thogluv, x, y + area.w);
return (a - b + c - d);
}
texture<float2, hipTextureType2D, hipReadModeElementType> troi;
template<typename Policy>
template<bool isUp>
__device void CascadeInvoker<Policy>::detect(Detection* objects, const uint ndetections, uint* ctr, const int downscales) const
{
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int x = blockIdx.x;
// load Level
__shared__ Level level;
// check POI
__shared__ volatile char roiCache[Policy::STA_Y];
if (!threadIdx.y && !threadIdx.x)
((float2*)roiCache)[threadIdx.x] = tex2D(troi, blockIdx.y, x);
__syncthreads();
if (!roiCache[threadIdx.y]) return;
if (!threadIdx.x)
level = levels[downscales + blockIdx.z];
if(x >= level.workRect.x || y >= level.workRect.y) return;
int st = level.octave * level.step;
const int stEnd = st + level.step;
const int hogluvStep = gridDim.y * Policy::STA_Y;
float confidence = 0.f;
for(; st < stEnd; st += Policy::WARP)
{
const int nId = (st + threadIdx.x) * 3;
Node node = nodes[nId];
float threshold = rescale<isUp>(level, node);
int sum = get<isUp>(x, y + (node.threshold >> 28) * hogluvStep, node.rect);
int next = 1 + (int)(sum >= threshold);
node = nodes[nId + next];
threshold = rescale<isUp>(level, node);
sum = get<isUp>(x, y + (node.threshold >> 28) * hogluvStep, node.rect);
const int lShift = (next - 1) * 2 + (int)(sum >= threshold);
float impact = leaves[(st + threadIdx.x) * 4 + lShift];
PrefixSum<Policy>::apply(impact);
#if __CUDA_ARCH__ >= 120
if(__any((confidence + impact <= stages[(st + threadIdx.x)]))) st += 2048;
#endif
#if __CUDA_ARCH__ >= 300
impact = __shfl(impact, 31);
#endif
confidence += impact;
}
if(!threadIdx.x && st == stEnd && ((confidence - FLT_EPSILON) >= 0))
{
int idx = atomicInc(ctr, ndetections);
objects[idx] = Detection(__float2int_rn(x * Policy::SHRINKAGE),
__float2int_rn(y * Policy::SHRINKAGE), level.objSize.x, level.objSize.y, confidence);
}
}
template<typename Policy, bool isUp>
__global__ void soft_cascade(const CascadeInvoker<Policy> invoker, Detection* objects, const uint n, uint* ctr, const int downs)
{
invoker.template detect<isUp>(objects, n, ctr, downs);
}
template<typename Policy>
void CascadeInvoker<Policy>::operator()(const PtrStepSzb& roi, const PtrStepSzi& hogluv,
PtrStepSz<uchar4> objects, const int downscales, const hipStream_t& stream) const
{
int fw = roi.rows;
int fh = roi.cols;
dim3 grid(fw, fh / Policy::STA_Y, downscales);
uint* ctr = (uint*)(objects.ptr(0));
Detection* det = ((Detection*)objects.ptr(0)) + 1;
uint max_det = objects.cols / sizeof(Detection);
hipChannelFormatDesc desc = hipCreateChannelDesc<int>();
cudaSafeCall( hipBindTexture2D(0, thogluv, hogluv.data, desc, hogluv.cols, hogluv.rows, hogluv.step));
hipChannelFormatDesc desc_roi = hipCreateChannelDesc<typename Policy::roi_type>();
cudaSafeCall( hipBindTexture2D(0, troi, roi.data, desc_roi, roi.cols / Policy::STA_Y, roi.rows, roi.step));
const CascadeInvoker<Policy> inv = *this;
hipLaunchKernelGGL(( soft_cascade<Policy, false>), dim3(grid), dim3(Policy::block()), 0, stream, inv, det, max_det, ctr, 0);
cudaSafeCall( hipGetLastError());
grid = dim3(fw, fh / Policy::STA_Y, min(38, scales) - downscales);
hipLaunchKernelGGL(( soft_cascade<Policy, true>), dim3(grid), dim3(Policy::block()), 0, stream, inv, det, max_det, ctr, downscales);
if (!stream)
{
cudaSafeCall( hipGetLastError());
cudaSafeCall( hipDeviceSynchronize());
}
}
template void CascadeInvoker<GK107PolicyX4>::operator()(const PtrStepSzb& roi, const PtrStepSzi& hogluv,
PtrStepSz<uchar4> objects, const int downscales, const hipStream_t& stream) const;
}
}}}
| 8b8b7e427a31c42d6f48ff2e1d9d7ee38923bf9d.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2008-2012, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include <opencv2/gpu/device/common.hpp>
#include <opencv2/gpu/device/saturate_cast.hpp>
#include <icf.hpp>
#include <float.h>
#include <stdio.h>
namespace cv { namespace gpu { namespace device {
namespace icf {
template <int FACTOR>
__device__ __forceinline__ uchar shrink(const uchar* ptr, const int pitch, const int y, const int x)
{
int out = 0;
#pragma unroll
for(int dy = 0; dy < FACTOR; ++dy)
#pragma unroll
for(int dx = 0; dx < FACTOR; ++dx)
{
out += ptr[dy * pitch + dx];
}
return static_cast<uchar>(out / (FACTOR * FACTOR));
}
template<int FACTOR>
__global__ void shrink(const uchar* __restrict__ hogluv, const int inPitch,
uchar* __restrict__ shrank, const int outPitch )
{
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const uchar* ptr = hogluv + (FACTOR * y) * inPitch + (FACTOR * x);
shrank[ y * outPitch + x] = shrink<FACTOR>(ptr, inPitch, y, x);
}
void shrink(const cv::gpu::PtrStepSzb& channels, cv::gpu::PtrStepSzb shrunk)
{
dim3 block(32, 8);
dim3 grid(shrunk.cols / 32, shrunk.rows / 8);
shrink<4><<<grid, block>>>((uchar*)channels.ptr(), channels.step, (uchar*)shrunk.ptr(), shrunk.step);
cudaSafeCall(cudaDeviceSynchronize());
}
__device__ __forceinline__ void luv(const float& b, const float& g, const float& r, uchar& __l, uchar& __u, uchar& __v)
{
// rgb -> XYZ
float x = 0.412453f * r + 0.357580f * g + 0.180423f * b;
float y = 0.212671f * r + 0.715160f * g + 0.072169f * b;
float z = 0.019334f * r + 0.119193f * g + 0.950227f * b;
// computed for D65
const float _ur = 0.19783303699678276f;
const float _vr = 0.46833047435252234f;
const float divisor = fmax((x + 15.f * y + 3.f * z), FLT_EPSILON);
const float _u = __fdividef(4.f * x, divisor);
const float _v = __fdividef(9.f * y, divisor);
float hack = static_cast<float>(__float2int_rn(y * 2047)) / 2047;
const float L = fmax(0.f, ((116.f * cbrtf(hack)) - 16.f));
const float U = 13.f * L * (_u - _ur);
const float V = 13.f * L * (_v - _vr);
// L in [0, 100], u in [-134, 220], v in [-140, 122]
__l = static_cast<uchar>( L * (255.f / 100.f));
__u = static_cast<uchar>((U + 134.f) * (255.f / (220.f + 134.f )));
__v = static_cast<uchar>((V + 140.f) * (255.f / (122.f + 140.f )));
}
__global__ void bgr2Luv_d(const uchar* rgb, const int rgbPitch, uchar* luvg, const int luvgPitch)
{
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
uchar3 color = ((uchar3*)(rgb + rgbPitch * y))[x];
uchar l, u, v;
luv(color.x / 255.f, color.y / 255.f, color.z / 255.f, l, u, v);
luvg[luvgPitch * y + x] = l;
luvg[luvgPitch * (y + 480) + x] = u;
luvg[luvgPitch * (y + 2 * 480) + x] = v;
}
void bgr2Luv(const PtrStepSzb& bgr, PtrStepSzb luv)
{
dim3 block(32, 8);
dim3 grid(bgr.cols / 32, bgr.rows / 8);
bgr2Luv_d<<<grid, block>>>((const uchar*)bgr.ptr(0), bgr.step, (uchar*)luv.ptr(0), luv.step);
cudaSafeCall(cudaDeviceSynchronize());
}
template<bool isDefaultNum>
__device__ __forceinline__ int fast_angle_bin(const float& dx, const float& dy)
{
const float angle_quantum = CV_PI / 6.f;
float angle = atan2(dx, dy) + (angle_quantum / 2.f);
if (angle < 0) angle += CV_PI;
const float angle_scaling = 1.f / angle_quantum;
return static_cast<int>(angle * angle_scaling) % 6;
}
template<>
__device__ __forceinline__ int fast_angle_bin<true>(const float& dy, const float& dx)
{
int index = 0;
float max_dot = fabs(dx);
{
const float dot_product = fabs(dx * 0.8660254037844386f + dy * 0.5f);
if(dot_product > max_dot)
{
max_dot = dot_product;
index = 1;
}
}
{
const float dot_product = fabs(dy * 0.8660254037844386f + dx * 0.5f);
if(dot_product > max_dot)
{
max_dot = dot_product;
index = 2;
}
}
{
int i = 3;
float2 bin_vector_i;
bin_vector_i.x = ::cos(i * (CV_PI / 6.f));
bin_vector_i.y = ::sin(i * (CV_PI / 6.f));
const float dot_product = fabs(dx * bin_vector_i.x + dy * bin_vector_i.y);
if(dot_product > max_dot)
{
max_dot = dot_product;
index = i;
}
}
{
const float dot_product = fabs(dx * (-0.4999999999999998f) + dy * 0.8660254037844387f);
if(dot_product > max_dot)
{
max_dot = dot_product;
index = 4;
}
}
{
const float dot_product = fabs(dx * (-0.8660254037844387f) + dy * 0.49999999999999994f);
if(dot_product > max_dot)
{
max_dot = dot_product;
index = 5;
}
}
return index;
}
texture<uchar, cudaTextureType2D, cudaReadModeElementType> tgray;
template<bool isDefaultNum>
__global__ void gray2hog(PtrStepSzb mag)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const float dx = tex2D(tgray, x + 1, y + 0) - tex2D(tgray, x - 1, y - 0);
const float dy = tex2D(tgray, x + 0, y + 1) - tex2D(tgray, x - 0, y - 1);
const float magnitude = sqrtf((dx * dx) + (dy * dy)) * (1.0f / sqrtf(2));
const uchar cmag = static_cast<uchar>(magnitude);
mag( 480 * 6 + y, x) = cmag;
mag( 480 * fast_angle_bin<isDefaultNum>(dy, dx) + y, x) = cmag;
}
void gray2hog(const PtrStepSzb& gray, PtrStepSzb mag, const int bins)
{
dim3 block(32, 8);
dim3 grid(gray.cols / 32, gray.rows / 8);
cudaChannelFormatDesc desc = cudaCreateChannelDesc<uchar>();
cudaSafeCall( cudaBindTexture2D(0, tgray, gray.data, desc, gray.cols, gray.rows, gray.step) );
if (bins == 6)
gray2hog<true><<<grid, block>>>(mag);
else
gray2hog<false><<<grid, block>>>(mag);
cudaSafeCall(cudaDeviceSynchronize());
}
// ToDo: use textures or uncached load instruction.
__global__ void magToHist(const uchar* __restrict__ mag,
const float* __restrict__ angle, const int angPitch,
uchar* __restrict__ hog, const int hogPitch, const int fh)
{
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int bin = (int)(angle[y * angPitch + x]);
const uchar val = mag[y * hogPitch + x];
hog[((fh * bin) + y) * hogPitch + x] = val;
}
void fillBins(cv::gpu::PtrStepSzb hogluv, const cv::gpu::PtrStepSzf& nangle,
const int fw, const int fh, const int bins, cudaStream_t stream )
{
const uchar* mag = (const uchar*)hogluv.ptr(fh * bins);
uchar* hog = (uchar*)hogluv.ptr();
const float* angle = (const float*)nangle.ptr();
dim3 block(32, 8);
dim3 grid(fw / 32, fh / 8);
magToHist<<<grid, block, 0, stream>>>(mag, angle, nangle.step / sizeof(float), hog, hogluv.step, fh);
if (!stream)
{
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
}
__device__ __forceinline__ float overlapArea(const Detection &a, const Detection &b)
{
int w = ::min(a.x + a.w, b.x + b.w) - ::max(a.x, b.x);
int h = ::min(a.y + a.h, b.y + b.h) - ::max(a.y, b.y);
return (w < 0 || h < 0)? 0.f : (float)(w * h);
}
texture<uint4, cudaTextureType2D, cudaReadModeElementType> tdetections;
__global__ void overlap(const uint* n, uchar* overlaps)
{
const int idx = threadIdx.x;
const int total = *n;
for (int i = idx + 1; i < total; i += 192)
{
const uint4 _a = tex2D(tdetections, i, 0);
const Detection& a = *((Detection*)(&_a));
bool excluded = false;
for (int j = i + 1; j < total; ++j)
{
const uint4 _b = tex2D(tdetections, j, 0);
const Detection& b = *((Detection*)(&_b));
float ovl = overlapArea(a, b) / ::min(a.w * a.h, b.w * b.h);
if (ovl > 0.65f)
{
int suppessed = (a.confidence > b.confidence)? j : i;
overlaps[suppessed] = 1;
excluded = excluded || (suppessed == i);
}
#if __CUDA_ARCH__ >= 120
if (__all(excluded)) break;
#endif
}
}
}
__global__ void collect(const uint* n, uchar* overlaps, uint* ctr, uint4* suppressed)
{
const int idx = threadIdx.x;
const int total = *n;
for (int i = idx; i < total; i += 192)
{
if (!overlaps[i])
{
int oidx = atomicInc(ctr, 50);
suppressed[oidx] = tex2D(tdetections, i + 1, 0);
}
}
}
void suppress(const PtrStepSzb& objects, PtrStepSzb overlaps, PtrStepSzi ndetections,
PtrStepSzb suppressed, cudaStream_t stream)
{
int block = 192;
int grid = 1;
cudaChannelFormatDesc desc = cudaCreateChannelDesc<uint4>();
size_t offset;
cudaSafeCall( cudaBindTexture2D(&offset, tdetections, objects.data, desc, objects.cols / sizeof(uint4), objects.rows, objects.step));
overlap<<<grid, block>>>((uint*)ndetections.ptr(0), (uchar*)overlaps.ptr(0));
collect<<<grid, block>>>((uint*)ndetections.ptr(0), (uchar*)overlaps.ptr(0), (uint*)suppressed.ptr(0), ((uint4*)suppressed.ptr(0)) + 1);
if (!stream)
{
cudaSafeCall( cudaGetLastError());
cudaSafeCall( cudaDeviceSynchronize());
}
}
template<typename Policy>
struct PrefixSum
{
__device static void apply(float& impact)
{
#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300
#pragma unroll
// scan on shuffle functions
for (int i = 1; i < Policy::WARP; i *= 2)
{
const float n = __shfl_up(impact, i, Policy::WARP);
if (threadIdx.x >= i)
impact += n;
}
#else
__shared__ volatile float ptr[Policy::STA_X * Policy::STA_Y];
const int idx = threadIdx.y * Policy::STA_X + threadIdx.x;
ptr[idx] = impact;
if ( threadIdx.x >= 1) ptr [idx ] = (ptr [idx - 1] + ptr [idx]);
if ( threadIdx.x >= 2) ptr [idx ] = (ptr [idx - 2] + ptr [idx]);
if ( threadIdx.x >= 4) ptr [idx ] = (ptr [idx - 4] + ptr [idx]);
if ( threadIdx.x >= 8) ptr [idx ] = (ptr [idx - 8] + ptr [idx]);
if ( threadIdx.x >= 16) ptr [idx ] = (ptr [idx - 16] + ptr [idx]);
impact = ptr[idx];
#endif
}
};
texture<int, cudaTextureType2D, cudaReadModeElementType> thogluv;
template<bool isUp>
__device__ __forceinline__ float rescale(const Level& level, Node& node)
{
uchar4& scaledRect = node.rect;
float relScale = level.relScale;
float farea = (scaledRect.z - scaledRect.x) * (scaledRect.w - scaledRect.y);
// rescale
scaledRect.x = __float2int_rn(relScale * scaledRect.x);
scaledRect.y = __float2int_rn(relScale * scaledRect.y);
scaledRect.z = __float2int_rn(relScale * scaledRect.z);
scaledRect.w = __float2int_rn(relScale * scaledRect.w);
float sarea = (scaledRect.z - scaledRect.x) * (scaledRect.w - scaledRect.y);
const float expected_new_area = farea * relScale * relScale;
float approx = (sarea == 0)? 1: __fdividef(sarea, expected_new_area);
float rootThreshold = (node.threshold & 0x0FFFFFFFU) * approx * level.scaling[(node.threshold >> 28) > 6];
return rootThreshold;
}
template<>
__device__ __forceinline__ float rescale<true>(const Level& level, Node& node)
{
uchar4& scaledRect = node.rect;
float relScale = level.relScale;
float farea = scaledRect.z * scaledRect.w;
// rescale
scaledRect.x = __float2int_rn(relScale * scaledRect.x);
scaledRect.y = __float2int_rn(relScale * scaledRect.y);
scaledRect.z = __float2int_rn(relScale * scaledRect.z);
scaledRect.w = __float2int_rn(relScale * scaledRect.w);
float sarea = scaledRect.z * scaledRect.w;
const float expected_new_area = farea * relScale * relScale;
float approx = __fdividef(sarea, expected_new_area);
float rootThreshold = (node.threshold & 0x0FFFFFFFU) * approx * level.scaling[(node.threshold >> 28) > 6];
return rootThreshold;
}
template<bool isUp>
__device__ __forceinline__ int get(int x, int y, uchar4 area)
{
int a = tex2D(thogluv, x + area.x, y + area.y);
int b = tex2D(thogluv, x + area.z, y + area.y);
int c = tex2D(thogluv, x + area.z, y + area.w);
int d = tex2D(thogluv, x + area.x, y + area.w);
return (a - b + c - d);
}
template<>
__device__ __forceinline__ int get<true>(int x, int y, uchar4 area)
{
x += area.x;
y += area.y;
int a = tex2D(thogluv, x, y);
int b = tex2D(thogluv, x + area.z, y);
int c = tex2D(thogluv, x + area.z, y + area.w);
int d = tex2D(thogluv, x, y + area.w);
return (a - b + c - d);
}
texture<float2, cudaTextureType2D, cudaReadModeElementType> troi;
template<typename Policy>
template<bool isUp>
__device void CascadeInvoker<Policy>::detect(Detection* objects, const uint ndetections, uint* ctr, const int downscales) const
{
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int x = blockIdx.x;
// load Level
__shared__ Level level;
// check POI
__shared__ volatile char roiCache[Policy::STA_Y];
if (!threadIdx.y && !threadIdx.x)
((float2*)roiCache)[threadIdx.x] = tex2D(troi, blockIdx.y, x);
__syncthreads();
if (!roiCache[threadIdx.y]) return;
if (!threadIdx.x)
level = levels[downscales + blockIdx.z];
if(x >= level.workRect.x || y >= level.workRect.y) return;
int st = level.octave * level.step;
const int stEnd = st + level.step;
const int hogluvStep = gridDim.y * Policy::STA_Y;
float confidence = 0.f;
for(; st < stEnd; st += Policy::WARP)
{
const int nId = (st + threadIdx.x) * 3;
Node node = nodes[nId];
float threshold = rescale<isUp>(level, node);
int sum = get<isUp>(x, y + (node.threshold >> 28) * hogluvStep, node.rect);
int next = 1 + (int)(sum >= threshold);
node = nodes[nId + next];
threshold = rescale<isUp>(level, node);
sum = get<isUp>(x, y + (node.threshold >> 28) * hogluvStep, node.rect);
const int lShift = (next - 1) * 2 + (int)(sum >= threshold);
float impact = leaves[(st + threadIdx.x) * 4 + lShift];
PrefixSum<Policy>::apply(impact);
#if __CUDA_ARCH__ >= 120
if(__any((confidence + impact <= stages[(st + threadIdx.x)]))) st += 2048;
#endif
#if __CUDA_ARCH__ >= 300
impact = __shfl(impact, 31);
#endif
confidence += impact;
}
if(!threadIdx.x && st == stEnd && ((confidence - FLT_EPSILON) >= 0))
{
int idx = atomicInc(ctr, ndetections);
objects[idx] = Detection(__float2int_rn(x * Policy::SHRINKAGE),
__float2int_rn(y * Policy::SHRINKAGE), level.objSize.x, level.objSize.y, confidence);
}
}
template<typename Policy, bool isUp>
__global__ void soft_cascade(const CascadeInvoker<Policy> invoker, Detection* objects, const uint n, uint* ctr, const int downs)
{
invoker.template detect<isUp>(objects, n, ctr, downs);
}
template<typename Policy>
void CascadeInvoker<Policy>::operator()(const PtrStepSzb& roi, const PtrStepSzi& hogluv,
PtrStepSz<uchar4> objects, const int downscales, const cudaStream_t& stream) const
{
int fw = roi.rows;
int fh = roi.cols;
dim3 grid(fw, fh / Policy::STA_Y, downscales);
uint* ctr = (uint*)(objects.ptr(0));
Detection* det = ((Detection*)objects.ptr(0)) + 1;
uint max_det = objects.cols / sizeof(Detection);
cudaChannelFormatDesc desc = cudaCreateChannelDesc<int>();
cudaSafeCall( cudaBindTexture2D(0, thogluv, hogluv.data, desc, hogluv.cols, hogluv.rows, hogluv.step));
cudaChannelFormatDesc desc_roi = cudaCreateChannelDesc<typename Policy::roi_type>();
cudaSafeCall( cudaBindTexture2D(0, troi, roi.data, desc_roi, roi.cols / Policy::STA_Y, roi.rows, roi.step));
const CascadeInvoker<Policy> inv = *this;
soft_cascade<Policy, false><<<grid, Policy::block(), 0, stream>>>(inv, det, max_det, ctr, 0);
cudaSafeCall( cudaGetLastError());
grid = dim3(fw, fh / Policy::STA_Y, min(38, scales) - downscales);
soft_cascade<Policy, true><<<grid, Policy::block(), 0, stream>>>(inv, det, max_det, ctr, downscales);
if (!stream)
{
cudaSafeCall( cudaGetLastError());
cudaSafeCall( cudaDeviceSynchronize());
}
}
template void CascadeInvoker<GK107PolicyX4>::operator()(const PtrStepSzb& roi, const PtrStepSzi& hogluv,
PtrStepSz<uchar4> objects, const int downscales, const cudaStream_t& stream) const;
}
}}}
|
a880877e3e568ee0c88b2430f38c46bfd8170f29.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <lbann-dev@llnl.gov>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#include "lbann/optimizers/sgd.hpp"
namespace lbann {
namespace {
__global__ void momentum_noncontiguous_kernel(size_t height,
size_t width,
DataType learning_rate,
DataType momentum,
DataType * __restrict__ values,
size_t values_ldim,
const DataType * __restrict__ gradient,
size_t gradient_ldim,
DataType * __restrict__ velocity,
size_t velocity_ldim) {
const size_t gid = threadIdx.x + blockIdx.x * blockDim.x;
if (gid < height * width) {
const auto& row = gid % height;
const auto& col = gid / height;
const auto& g = gradient[row + col * gradient_ldim];
auto& v = velocity[row + col * velocity_ldim];
auto& x = values[row + col * values_ldim];
v = momentum * v + g;
x -= learning_rate * v;
}
}
__global__ void momentum_contiguous_kernel(size_t size,
DataType learning_rate,
DataType momentum,
DataType * __restrict__ values,
const DataType * __restrict__ gradient,
DataType * __restrict__ velocity) {
const size_t gid = threadIdx.x + blockIdx.x * blockDim.x;
if (gid < size) {
const auto& g = gradient[gid];
auto& v = velocity[gid];
auto& x = values[gid];
v = momentum * v + g;
x -= learning_rate * v;
}
}
__global__ void nesterov_kernel(size_t height,
size_t width,
DataType learning_rate,
DataType momentum,
DataType * __restrict__ values,
size_t values_ldim,
const DataType * __restrict__ gradient,
size_t gradient_ldim,
DataType * __restrict__ velocity,
size_t velocity_ldim) {
const size_t gid = threadIdx.x + blockIdx.x * blockDim.x;
const size_t nthreads = gridDim.x * blockDim.x;
for (size_t pos = gid; pos < height * width; pos += nthreads) {
const auto& row = pos % height;
const auto& col = pos / height;
const auto& g = gradient[row + col * gradient_ldim];
auto& v = velocity[row + col * velocity_ldim];
auto& x = values[row + col * values_ldim];
v = momentum * v + g;
x -= learning_rate * (momentum * v + g);
}
}
} // namespace
void sgd::momentum_step_gpu(AbsDistMat& values, const AbsDistMat& gradient) {
// Get matrix dimensions
const size_t local_height = values.LocalHeight();
const size_t local_width = values.LocalWidth();
const size_t local_size = local_height * local_width;
if (local_size <= 0) { return; }
// Launch CUDA kernels for momentum SGD or NAG
constexpr size_t block_size = 256;
const size_t grid_size = (local_size + block_size - 1) / block_size;
auto&& stream = El::GPUManager::Stream();
if (m_nesterov) {
hipLaunchKernelGGL(( nesterov_kernel), dim3(grid_size), dim3(block_size), 0, stream,
local_height, local_width,
this->get_learning_rate(), m_momentum,
values.Buffer(), values.LDim(),
gradient.LockedBuffer(), gradient.LDim(),
m_velocity->Buffer(), m_velocity->LDim());
} else {
if (values.Contiguous() && gradient.Contiguous()
&& m_velocity->Contiguous()) {
hipLaunchKernelGGL(( momentum_contiguous_kernel), dim3(grid_size), dim3(block_size), 0, stream,
local_size, this->get_learning_rate(), m_momentum,
values.Buffer(), gradient.LockedBuffer(), m_velocity->Buffer());
} else {
hipLaunchKernelGGL(( momentum_noncontiguous_kernel), dim3(grid_size), dim3(block_size), 0, stream,
local_height, local_width,
this->get_learning_rate(), m_momentum,
values.Buffer(), values.LDim(),
gradient.LockedBuffer(), gradient.LDim(),
m_velocity->Buffer(), m_velocity->LDim());
}
}
}
} // namespace lbann
| a880877e3e568ee0c88b2430f38c46bfd8170f29.cu | ////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <lbann-dev@llnl.gov>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#include "lbann/optimizers/sgd.hpp"
namespace lbann {
namespace {
__global__ void momentum_noncontiguous_kernel(size_t height,
size_t width,
DataType learning_rate,
DataType momentum,
DataType * __restrict__ values,
size_t values_ldim,
const DataType * __restrict__ gradient,
size_t gradient_ldim,
DataType * __restrict__ velocity,
size_t velocity_ldim) {
const size_t gid = threadIdx.x + blockIdx.x * blockDim.x;
if (gid < height * width) {
const auto& row = gid % height;
const auto& col = gid / height;
const auto& g = gradient[row + col * gradient_ldim];
auto& v = velocity[row + col * velocity_ldim];
auto& x = values[row + col * values_ldim];
v = momentum * v + g;
x -= learning_rate * v;
}
}
__global__ void momentum_contiguous_kernel(size_t size,
DataType learning_rate,
DataType momentum,
DataType * __restrict__ values,
const DataType * __restrict__ gradient,
DataType * __restrict__ velocity) {
const size_t gid = threadIdx.x + blockIdx.x * blockDim.x;
if (gid < size) {
const auto& g = gradient[gid];
auto& v = velocity[gid];
auto& x = values[gid];
v = momentum * v + g;
x -= learning_rate * v;
}
}
__global__ void nesterov_kernel(size_t height,
size_t width,
DataType learning_rate,
DataType momentum,
DataType * __restrict__ values,
size_t values_ldim,
const DataType * __restrict__ gradient,
size_t gradient_ldim,
DataType * __restrict__ velocity,
size_t velocity_ldim) {
const size_t gid = threadIdx.x + blockIdx.x * blockDim.x;
const size_t nthreads = gridDim.x * blockDim.x;
for (size_t pos = gid; pos < height * width; pos += nthreads) {
const auto& row = pos % height;
const auto& col = pos / height;
const auto& g = gradient[row + col * gradient_ldim];
auto& v = velocity[row + col * velocity_ldim];
auto& x = values[row + col * values_ldim];
v = momentum * v + g;
x -= learning_rate * (momentum * v + g);
}
}
} // namespace
void sgd::momentum_step_gpu(AbsDistMat& values, const AbsDistMat& gradient) {
// Get matrix dimensions
const size_t local_height = values.LocalHeight();
const size_t local_width = values.LocalWidth();
const size_t local_size = local_height * local_width;
if (local_size <= 0) { return; }
// Launch CUDA kernels for momentum SGD or NAG
constexpr size_t block_size = 256;
const size_t grid_size = (local_size + block_size - 1) / block_size;
auto&& stream = El::GPUManager::Stream();
if (m_nesterov) {
nesterov_kernel<<<grid_size, block_size, 0, stream>>>(
local_height, local_width,
this->get_learning_rate(), m_momentum,
values.Buffer(), values.LDim(),
gradient.LockedBuffer(), gradient.LDim(),
m_velocity->Buffer(), m_velocity->LDim());
} else {
if (values.Contiguous() && gradient.Contiguous()
&& m_velocity->Contiguous()) {
momentum_contiguous_kernel<<<grid_size, block_size, 0, stream>>>(
local_size, this->get_learning_rate(), m_momentum,
values.Buffer(), gradient.LockedBuffer(), m_velocity->Buffer());
} else {
momentum_noncontiguous_kernel<<<grid_size, block_size, 0, stream>>>(
local_height, local_width,
this->get_learning_rate(), m_momentum,
values.Buffer(), values.LDim(),
gradient.LockedBuffer(), gradient.LDim(),
m_velocity->Buffer(), m_velocity->LDim());
}
}
}
} // namespace lbann
|
d1d90c04ad8e9aa7a41d6f090ca86d5262eb6ea9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define d_Mu(x, y) d_Mu[(y) * (nx) + (x)]
#define d_ave_Mu(x, y) d_ave_Mu[(y) * (nx) + (x)]
#define d_field(z, x) d_field[(x) * (nz) + (z)]
#define d_bnd(x, indT) d_bnd[(indT) * (len_Bnd_vec) + (x)]
#define d_Den(x, y) d_Den[(y) * (nx) + (x)]
#define d_ave_Byc_a(x, y) d_ave_Byc_a[(y) * (nx) + (x)]
#define d_ave_Byc_b(x, y) d_ave_Byc_b[(y) * (nx) + (x)]
#include "utilities.h"
void fileBinLoad(float *h_bin, int size, std::string fname) {
FILE *fp = fopen(fname.c_str(), "rb");
if (fp == NULL) {
std::cout << "Attempted to read " << fname << std::endl;
printf("File reading error!\n");
exit(1);
} else {
size_t sizeRead = fread(h_bin, sizeof(float), size, fp);
}
fclose(fp);
}
void fileBinWrite(float *h_bin, int size, std::string fname) {
FILE *fp = fopen(fname.c_str(), "wb");
if (fp == NULL) {
printf("File writing error!\n");
exit(1);
} else {
fwrite(h_bin, sizeof(float), size, fp);
}
fclose(fp);
}
void fileBinWriteDouble(double *h_bin, int size, std::string fname) {
FILE *fp = fopen(fname.c_str(), "wb");
if (fp == NULL) {
printf("File writing error!\n");
exit(1);
} else {
fwrite(h_bin, sizeof(double), size, fp);
}
fclose(fp);
}
void initialArray(float *ip, int size, float value) {
for (int i = 0; i < size; i++) {
ip[i] = value;
// printf("value = %f\n", value);
}
}
void initialArray(double *ip, int size, double value) {
for (int i = 0; i < size; i++) {
ip[i] = value;
// printf("value = %f\n", value);
}
}
__global__ void intialArrayGPU(float *ip, int nx, int ny, float value) {
int gidx = threadIdx.x + blockDim.x * blockIdx.x;
int gidy = threadIdx.y + blockDim.y * blockIdx.y;
if (gidx < nx && gidy < ny) {
int offset = gidx + gidy * nx;
ip[offset] = value;
}
}
__global__ void assignArrayGPU(float *ip_in, float *ip_out, int nx, int ny) {
int gidx = threadIdx.x + blockDim.x * blockIdx.x;
int gidy = threadIdx.y + blockDim.y * blockIdx.y;
if (gidx < nx && gidy < ny) {
int offset = gidx + gidy * nx;
ip_out[offset] = ip_in[offset];
}
}
void displayArray(std::string s, float *ip, int nx, int ny) {
// printf("ip: \n");
// printf("%s: \n", s);
std::cout << s << ": " << std::endl;
for (int i = 0; i < ny; i++) {
for (int j = 0; j < nx; j++) {
// printf("ip[%d, %d] = %f ", i, j, ip[i*nx+j]);
printf("%f ", ip[i * nx + j]);
}
printf("\n");
}
printf("\n\n\n");
}
__global__ void moduliInit(float *d_Cp, float *d_Cs, float *d_Den,
float *d_Lambda, float *d_Mu, int nx, int ny) {
// printf("Hello, world!\n");
int gidx = threadIdx.x + blockDim.x * blockIdx.x;
int gidy = threadIdx.y + blockDim.y * blockIdx.y;
int offset = gidx + gidy * nx;
if (gidx < nx && gidy < ny) {
// printf("offset = %d ", offset);
// printf("gridDim.x = %d ", gridDim.x);
// printf("blockIdx.y = %d ", blockIdx.y);
d_Mu[offset] = powf(d_Cs[offset], 2) * d_Den[offset];
d_Lambda[offset] =
d_Den[offset] * (powf(d_Cp[offset], 2) - 2 * powf(d_Cs[offset], 2));
if (d_Lambda[offset] < 0) {
printf("Lambda is negative!!!");
}
}
}
__global__ void velInit(float *d_Lambda, float *d_Mu, float *d_Den, float *d_Cp,
float *d_Cs, int nx, int ny) {
// printf("Hello, world!\n");
int gidx = threadIdx.x + blockDim.x * blockIdx.x;
int gidy = threadIdx.y + blockDim.y * blockIdx.y;
int offset = gidx + gidy * nx;
if (gidx < nx && gidy < ny) {
// printf("offset = %d ", offset);
// printf("gridDim.x = %d ", gridDim.x);
// printf("blockIdx.y = %d ", blockIdx.y);
d_Cp[offset] =
sqrt((d_Lambda[offset] + 2.0 * d_Mu[offset]) / d_Den[offset]);
d_Cs[offset] = sqrt((d_Mu[offset]) / d_Den[offset]);
}
}
__global__ void aveMuInit(float *d_Mu, float *d_ave_Mu, int nx, int ny) {
int gidx = threadIdx.x + blockDim.x * blockIdx.x;
int gidy = threadIdx.y + blockDim.y * blockIdx.y;
float a, b, c, d;
if (gidx >= 2 && gidx <= nx - 3 && gidy >= 2 && gidy <= ny - 3) {
a = d_Mu(gidx, gidy);
b = d_Mu(gidx + 1, gidy);
c = d_Mu(gidx, gidy + 1);
d = d_Mu(gidx + 1, gidy + 1);
if (a == 0.0 || b == 0.0 || c == 0.0 || d == 0.0) {
d_ave_Mu(gidx, gidy) = 0.0;
} else {
d_ave_Mu(gidx, gidy) = 4.0 / (1.0 / a + 1.0 / b + 1.0 / c + 1.0 / d);
}
}
}
__global__ void aveBycInit(float *d_Den, float *d_ave_Byc_a, float *d_ave_Byc_b,
int nx, int ny) {
int gidx = threadIdx.x + blockDim.x * blockIdx.x;
int gidy = threadIdx.y + blockDim.y * blockIdx.y;
if (gidx >= 2 && gidx <= nx - 3 && gidy >= 2 && gidy <= ny - 3) {
d_ave_Byc_a(gidx, gidy) = 2.0 / (d_Den(gidx + 1, gidy) + d_Den(gidx, gidy));
d_ave_Byc_b(gidx, gidy) = 2.0 / (d_Den(gidx, gidy + 1) + d_Den(gidx, gidy));
} else {
return;
}
}
__global__ void gpuMinus(float *d_out, float *d_in1, float *d_in2, int nx,
int ny) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int idy = threadIdx.y + blockDim.y * blockIdx.y;
// only compute last N-1 time samples for misfits!!!!!!!! DL 02/25/2019
if (idx < nx && idy < ny && idx > 0) {
d_out[(idy) * (nx) + (idx)] =
d_in1[(idy) * (nx) + (idx)] - d_in2[(idy) * (nx) + (idx)];
} else if (idx == 0 && idy < ny) {
d_out[(idy) * (nx) + (idx)] = 0.0;
} else {
return;
}
}
__global__ void cuda_cal_objective(float *obj, float *err, int ng)
/*< calculate the value of objective function: obj >*/
{
const int Block_Size = 512;
__shared__ float sdata[Block_Size];
int tid = threadIdx.x;
sdata[tid] = 0.0f;
for (int s = 0; s < (ng + Block_Size - 1) / Block_Size; s++) {
int id = s * blockDim.x + threadIdx.x;
float a = (id < ng) ? err[id] : 0.0f;
// sdata[tid] += a*a;
sdata[tid] += powf(a, 2);
}
__syncthreads();
/* do reduction in shared mem */
// for(int s=blockDim.x/2; s>32; s>>=1) {
// if (threadIdx.x < s) sdata[tid] += sdata[tid + s];
// __syncthreads();
// }
// if (tid < 32) {
// if (blockDim.x >= 64) { sdata[tid] += sdata[tid + 32]; }
// if (blockDim.x >= 32) { sdata[tid] += sdata[tid + 16]; }
// if (blockDim.x >= 16) { sdata[tid] += sdata[tid + 8]; }
// if (blockDim.x >= 8) { sdata[tid] += sdata[tid + 4]; }
// if (blockDim.x >= 4) { sdata[tid] += sdata[tid + 2]; }
// if (blockDim.x >= 2) { sdata[tid] += sdata[tid + 1]; }
// }
for (int s = blockDim.x / 2; s >= 1; s /= 2) {
if (threadIdx.x < s) sdata[tid] += sdata[tid + s];
__syncthreads();
}
if (tid == 0) {
*obj = sdata[0];
}
}
float cal_objective(float *array, int N) {
float misfit = 0.0;
printf("hhh\n");
for (int i = 0; i < N; i++) {
misfit += array[i] * array[i];
}
return misfit;
}
float compCpAve(float *array, int N) {
float temp = 0.0;
for (int i = 0; i < N; i++) {
temp += array[i];
}
temp = temp / float(N);
return temp;
}
void compCourantNumber(float *h_Cp, int size, float dt, float dz, float dx) {
float max = h_Cp[0];
float Courant_number = 0.0;
for (int i = 0; i < size; i++) {
if (h_Cp[i] > max) {
max = h_Cp[i];
}
}
float dh_min = (dz < dx) ? dz : dx;
// Courant_number = max * dt * sqrtf(powf(1.0 / dz, 2) + powf(1.0 / dx, 2));
Courant_number = max * dt * sqrtf(2.0) * (1.0 / 24.0 + 9.0 / 8.0) / dh_min;
std::cout << "Courant_number = " << Courant_number << std::endl;
if (Courant_number > 1.0) exit(1);
}
void cpmlInit(float *K, float *a, float *b, float *K_half, float *a_half,
float *b_half, int N, int nPml, float dh, float f0, float dt,
float CpAve) {
float *damp, *damp_half, *alpha, *alpha_half;
float d0_h = 0.0;
float Rcoef = 0.0008;
float depth_in_pml = 0.0;
float depth_normalized = 0.0;
float thickness_PML = 0.0;
// const float PI = 3.141592653589793238462643383279502884197169;
const float K_MAX_PML = 2.0;
const float ALPHA_MAX_PML = 2.0 * PI * (f0 / 2.0);
const float NPOWER = 8.0;
const float c1 = 0.25, c2 = 0.75, c3 = 0.0;
// const float c1 = 0.0, c2 = 1.0, c3 = 0.0;
thickness_PML = nPml * dh; // changed here
CpAve = 3000.0; // DL make this model independent
d0_h = -(NPOWER + 1) * CpAve * log(Rcoef) / (2.0 * thickness_PML);
damp = (float *)malloc(N * sizeof(float));
damp_half = (float *)malloc(N * sizeof(float));
alpha = (float *)malloc(N * sizeof(float));
alpha_half = (float *)malloc(N * sizeof(float));
initialArray(damp, N, 0.0);
initialArray(damp_half, N, 0.0);
initialArray(K, N, 1.0);
initialArray(K_half, N, 1.0);
initialArray(alpha, N, 0.0);
initialArray(alpha_half, N, 0.0);
initialArray(a, N, 0.0);
initialArray(a_half, N, 0.0);
initialArray(b, N, 0.0);
initialArray(b_half, N, 0.0);
for (int i = 0; i < N; i++) {
// left edge
depth_in_pml = (nPml - i) * dh;
if (depth_in_pml >= 0.0) {
depth_normalized = depth_in_pml / thickness_PML;
damp[i] =
d0_h * (c1 * depth_normalized + c2 * pow(depth_normalized, NPOWER) +
c3 * pow(depth_normalized, 2 * NPOWER));
K[i] = 1.0 + (K_MAX_PML - 1.0) * pow(depth_normalized, NPOWER);
alpha[i] = ALPHA_MAX_PML * (1.0 - depth_normalized);
}
if (alpha[i] < 0.0) {
std::cout << "CPML alpha < 0.0 --" << __LINE__ << std::endl;
exit(1);
}
// half the grid points
depth_in_pml = (nPml - i - 0.5) * dh;
if (depth_in_pml >= 0.0) {
depth_normalized = depth_in_pml / thickness_PML;
damp_half[i] =
d0_h * (c1 * depth_normalized + c2 * pow(depth_normalized, NPOWER) +
c3 * pow(depth_normalized, 2 * NPOWER));
K_half[i] = 1.0 + (K_MAX_PML - 1.0) * pow(depth_normalized, NPOWER);
alpha_half[i] = ALPHA_MAX_PML * (1.0 - depth_normalized);
}
if (alpha_half[i] < 0.0) {
std::cout << "CPML alpha_half < 0.0 --" << __LINE__ << std::endl;
exit(1);
}
// right edge
depth_in_pml = (nPml - N + i) * dh;
if (depth_in_pml >= 0.0) {
depth_normalized = depth_in_pml / thickness_PML;
damp[i] =
d0_h * (c1 * depth_normalized + c2 * pow(depth_normalized, NPOWER) +
c3 * pow(depth_normalized, 2 * NPOWER));
K[i] = 1.0 + (K_MAX_PML - 1.0) * pow(depth_normalized, NPOWER);
alpha[i] = ALPHA_MAX_PML * (1.0 - depth_normalized);
}
if (alpha[i] < 0.0) {
std::cout << "CPML alpha < 0.0 --" << __LINE__ << std::endl;
exit(1);
}
depth_in_pml = (nPml - N + i + 0.5) * dh;
if (depth_in_pml >= 0.0) {
depth_normalized = depth_in_pml / thickness_PML;
damp_half[i] =
d0_h * (c1 * depth_normalized + c2 * pow(depth_normalized, NPOWER) +
c3 * pow(depth_normalized, 2 * NPOWER));
K_half[i] = 1.0 + (K_MAX_PML - 1.0) * powf(depth_normalized, NPOWER);
alpha_half[i] = ALPHA_MAX_PML * (1.0 - depth_normalized);
}
if (alpha_half[i] < 0.0) {
std::cout << "CPML alpha_half < 0.0 --" << __LINE__ << std::endl;
exit(1);
}
if (alpha[i] < 0.0) {
alpha[i] = 0.0;
}
if (alpha_half[i] < 0.0) {
alpha_half[i] = 0.0;
}
b[i] = expf(-(damp[i] / K[i] + alpha[i]) * dt);
b_half[i] = expf(-(damp_half[i] / K_half[i] + alpha_half[i]) * dt);
if (fabs(damp[i]) > 1.0e-6) {
a[i] = damp[i] * (b[i] - 1.0) / (K[i] * (damp[i] + K[i] * alpha[i]));
}
if (fabs(damp_half[i]) > 1.0e-6) {
a_half[i] = damp_half[i] * (b_half[i] - 1.0) /
(K_half[i] * (damp_half[i] + K_half[i] * alpha_half[i]));
}
}
free(damp);
free(damp_half);
free(alpha);
free(alpha_half);
}
// Dongzhuo Li 05/15/2019
__global__ void from_bnd(float *d_field, float *d_bnd, int nz, int nx,
int nzBnd, int nxBnd, int len_Bnd_vec, int nLayerStore,
int indT, int nPml, int nPad, int nSteps) {
int idxBnd = threadIdx.x + blockDim.x * blockIdx.x;
int iRow, jCol;
if (idxBnd >= 0 && idxBnd <= nLayerStore * nzBnd - 1) {
jCol = idxBnd / nzBnd;
iRow = idxBnd - jCol * nzBnd;
d_bnd(idxBnd, indT) = d_field((iRow + nPml - 2), (jCol + nPml - 2));
} else if (idxBnd >= nLayerStore * nzBnd &&
idxBnd <= 2 * nLayerStore * nzBnd - 1) {
jCol = (idxBnd - nLayerStore * nzBnd) / nzBnd;
iRow = (idxBnd - nLayerStore * nzBnd) - jCol * nzBnd;
d_bnd(idxBnd, indT) =
d_field((iRow + nPml - 2), (nx - nPml - jCol - 1 + 2));
} else if (idxBnd >= 2 * nLayerStore * nzBnd &&
idxBnd <= nLayerStore * (2 * nzBnd + nxBnd) - 1) {
iRow = (idxBnd - 2 * nLayerStore * nzBnd) / nxBnd;
jCol = (idxBnd - 2 * nLayerStore * nzBnd) - iRow * nxBnd;
d_bnd(idxBnd, indT) = d_field((iRow + nPml - 2), (jCol + nPml - 2));
} else if (idxBnd >= nLayerStore * (2 * nzBnd + nxBnd) &&
idxBnd <= 2 * nLayerStore * (nzBnd + nxBnd) - 1) {
iRow = (idxBnd - nLayerStore * (2 * nzBnd + nxBnd)) / nxBnd;
jCol = (idxBnd - nLayerStore * (2 * nzBnd + nxBnd)) - iRow * nxBnd;
d_bnd(idxBnd, indT) =
d_field((nz - nPml - nPad - iRow - 1 + 2), (jCol + nPml - 2));
} else {
return;
}
}
// Dongzhuo Li 05/15/2019
__global__ void to_bnd(float *d_field, float *d_bnd, int nz, int nx, int nzBnd,
int nxBnd, int len_Bnd_vec, int nLayerStore, int indT,
int nPml, int nPad, int nSteps) {
int idxBnd = threadIdx.x + blockDim.x * blockIdx.x;
int iRow, jCol;
if (idxBnd >= 0 && idxBnd <= nLayerStore * nzBnd - 1) {
jCol = idxBnd / nzBnd;
iRow = idxBnd - jCol * nzBnd;
d_field((iRow + nPml - 2), (jCol + nPml - 2)) = d_bnd(idxBnd, indT);
} else if (idxBnd >= nLayerStore * nzBnd &&
idxBnd <= 2 * nLayerStore * nzBnd - 1) {
jCol = (idxBnd - nLayerStore * nzBnd) / nzBnd;
iRow = (idxBnd - nLayerStore * nzBnd) - jCol * nzBnd;
d_field((iRow + nPml - 2), (nx - nPml - jCol - 1 + 2)) =
d_bnd(idxBnd, indT);
} else if (idxBnd >= 2 * nLayerStore * nzBnd &&
idxBnd <= nLayerStore * (2 * nzBnd + nxBnd) - 1) {
iRow = (idxBnd - 2 * nLayerStore * nzBnd) / nxBnd;
jCol = (idxBnd - 2 * nLayerStore * nzBnd) - iRow * nxBnd;
d_field((iRow + nPml - 2), (jCol + nPml - 2)) = d_bnd(idxBnd, indT);
} else if (idxBnd >= nLayerStore * (2 * nzBnd + nxBnd) &&
idxBnd <= 2 * nLayerStore * (nzBnd + nxBnd) - 1) {
iRow = (idxBnd - nLayerStore * (2 * nzBnd + nxBnd)) / nxBnd;
jCol = (idxBnd - nLayerStore * (2 * nzBnd + nxBnd)) - iRow * nxBnd;
d_field((nz - nPml - nPad - iRow - 1 + 2), (jCol + nPml - 2)) =
d_bnd(idxBnd, indT);
} else {
return;
}
}
// // Dongzhuo Li 02/24/2019
// __global__ void from_bnd(float *d_field, float *d_bnd, int nz, int nx, int
// nzBnd, \
// int nxBnd, int len_Bnd_vec, int nLayerStore, int indT, int nPml, int nPad,
// int nSteps) {
// int idxBnd = threadIdx.x + blockDim.x*blockIdx.x;
// int iRow,jCol;
// if(idxBnd>=0 && idxBnd<=nLayerStore*nzBnd-1) {
// jCol = idxBnd/nzBnd;
// iRow = idxBnd - jCol*nzBnd;
// d_bnd(idxBnd,indT) = d_field((iRow),(jCol));
// }
// else if(idxBnd>=nLayerStore*nzBnd && idxBnd<=2*nLayerStore*nzBnd-1){
// jCol = (idxBnd-nLayerStore*nzBnd)/nzBnd;
// iRow = (idxBnd-nLayerStore*nzBnd) - jCol*nzBnd;
// d_bnd(idxBnd,indT) = d_field((iRow),(nx-jCol-1));
// }
// else if(idxBnd>=2*nLayerStore*nzBnd &&
// idxBnd<=nLayerStore*(2*nzBnd+nxBnd)-1) {
// iRow = (idxBnd - 2*nLayerStore*nzBnd)/nxBnd;
// jCol = (idxBnd - 2*nLayerStore*nzBnd) - iRow*nxBnd;
// d_bnd(idxBnd,indT) = d_field((iRow),(jCol));
// }
// else if(idxBnd>=nLayerStore*(2*nzBnd+nxBnd) &&
// idxBnd<=2*nLayerStore*(nzBnd+nxBnd)-1) {
// iRow = (idxBnd - nLayerStore*(2*nzBnd+nxBnd))/nxBnd;
// jCol = (idxBnd - nLayerStore*(2*nzBnd+nxBnd)) - iRow*nxBnd;
// d_bnd(idxBnd,indT) = d_field((nz-nPad-iRow-1),(jCol));
// }
// else {
// return;
// }
// // if(idxBnd>=0 && idxBnd<=2*(nzBnd+nxBnd)-1) {
// // d_bnd(idxBnd, indT) = 1.0;
// // } else {
// // return;
// // }
// }
// // Dongzhuo Li 02/24/2019
// __global__ void to_bnd(float *d_field, float *d_bnd, int nz, int nx, int
// nzBnd, \
// int nxBnd, int len_Bnd_vec, int nLayerStore, int indT, int nPml, int nPad,
// int nSteps) {
// int idxBnd = threadIdx.x + blockDim.x*blockIdx.x;
// int iRow,jCol;
// if(idxBnd>=0 && idxBnd<=nLayerStore*nzBnd-1) {
// jCol = idxBnd/nzBnd;
// iRow = idxBnd - jCol*nzBnd;
// d_field((iRow),(jCol)) = d_bnd(idxBnd,indT);
// }
// else if(idxBnd>=nLayerStore*nzBnd && idxBnd<=2*nLayerStore*nzBnd-1){
// jCol = (idxBnd-nLayerStore*nzBnd)/nzBnd;
// iRow = (idxBnd-nLayerStore*nzBnd) - jCol*nzBnd;
// d_field((iRow),(nx-jCol-1)) = d_bnd(idxBnd,indT);
// }
// else if(idxBnd>=2*nLayerStore*nzBnd &&
// idxBnd<=nLayerStore*(2*nzBnd+nxBnd)-1) {
// iRow = (idxBnd - 2*nLayerStore*nzBnd)/nxBnd;
// jCol = (idxBnd - 2*nLayerStore*nzBnd) - iRow*nxBnd;
// d_field((iRow),(jCol)) = d_bnd(idxBnd,indT);
// }
// else if(idxBnd>=nLayerStore*(2*nzBnd+nxBnd) &&
// idxBnd<=2*nLayerStore*(nzBnd+nxBnd)-1) {
// iRow = (idxBnd - nLayerStore*(2*nzBnd+nxBnd))/nxBnd;
// jCol = (idxBnd - nLayerStore*(2*nzBnd+nxBnd)) - iRow*nxBnd;
// d_field((nz-nPad-iRow-1),(jCol)) = d_bnd(idxBnd,indT);
// }
// else {
// return;
// }
// }
__global__ void src_rec_gauss_amp(float *gauss_amp, int nz, int nx) {
int gidz = blockIdx.x * blockDim.x + threadIdx.x;
int gidx = blockIdx.y * blockDim.y + threadIdx.y;
if (gidz >= 0 && gidz < nz && gidx >= 0 && gidx < nx) {
int idz = gidz - nz / 2;
int idx = gidx - nx / 2;
gauss_amp[gidz + gidx * nz] =
expf(-1000.0 * (powf(float(idz), 2) + powf(float(idx), 2)));
// printf("gidz=%d, gidx=%d, gauss_amp=%.10f\n", gidz, gidx,
// gauss_amp[gidz + gidx * nz]);
} else {
return;
}
}
__global__ void add_source(float *d_szz, float *d_sxx, float amp, int nz,
bool isFor, int z_loc, int x_loc, float dt,
float *gauss_amp) {
// int id = threadIdx.x + blockDim.x * blockIdx.x;
int gidz = blockIdx.x * blockDim.x + threadIdx.x;
int gidx = blockIdx.y * blockDim.y + threadIdx.y;
float scale = pow(1500.0, 2);
if (isFor) {
if (gidz >= 0 && gidz < 9 && gidx >= 0 && gidx < 9) {
int idz = gidz - 9 / 2;
int idx = gidx - 9 / 2;
// printf("amp = %f ", amp);
d_szz[(z_loc + idz) + nz * (x_loc + idx)] +=
scale * amp * dt * gauss_amp[gidz + gidx * 9];
// crosswell borehole source (can be modified) assume cp/cs = sqrt(3.0)
d_sxx[(z_loc + idz) + nz * (x_loc + idx)] +=
RSXXZZ * scale * amp * dt * gauss_amp[gidz + gidx * 9];
} else {
return;
}
} else {
if (gidz >= 0 && gidz < 9 && gidx >= 0 && gidx < 9) {
int idz = gidz - 9 / 2;
int idx = gidx - 9 / 2;
// printf("amp = %f ", amp);
d_szz[(z_loc + idz) + nz * (x_loc + idx)] -=
scale * amp * dt * gauss_amp[gidz + gidx * 9];
d_sxx[(z_loc + idz) + nz * (x_loc + idx)] -=
RSXXZZ * scale * amp * dt * gauss_amp[gidz + gidx * 9];
} else {
return;
}
}
}
__global__ void recording(float *d_szz, float *d_sxx, int nz, float *d_data,
int iShot, int it, int nSteps, int nrec, int *d_z_rec,
int *d_x_rec) {
int iRec = threadIdx.x + blockDim.x * blockIdx.x;
if (iRec >= nrec) {
return;
}
d_data[(iRec) * (nSteps) + (it)] =
d_szz[d_z_rec[iRec] + d_x_rec[iRec] * nz] +
RSXXZZ * d_sxx[d_z_rec[iRec] + d_x_rec[iRec] * nz];
}
__global__ void res_injection(float *d_szz_adj, float *d_sxx_adj, int nz,
float *d_res, int it, float dt, int nSteps,
int nrec, int *d_z_rec, int *d_x_rec) {
int iRec = threadIdx.x + blockDim.x * blockIdx.x;
if (iRec >= nrec) {
return;
}
d_szz_adj[d_z_rec[iRec] + nz * d_x_rec[iRec]] +=
d_res[(iRec) * (nSteps) + (it)];
d_sxx_adj[d_z_rec[iRec] + nz * d_x_rec[iRec]] +=
RSXXZZ * d_res[(iRec) * (nSteps) + (it)];
}
__global__ void source_grad(float *d_szz_adj, float *d_sxx_adj, int nz,
float *d_StfGrad, int it, float dt, int z_src,
int x_src) {
int id = threadIdx.x + blockDim.x * blockIdx.x;
if (id == 0) {
d_StfGrad[it] = -(d_szz_adj[z_src + nz * x_src] +
RSXXZZ * d_sxx_adj[z_src + nz * x_src]) *
dt;
} else {
return;
}
}
// Dongzhuo Li 01/28/2019
__global__ void cuda_bp_filter1d(int nSteps, float dt, int nrec,
hipfftComplex *d_data_F, float f0, float f1,
float f2, float f3) {
int nf = nSteps / 2 + 1;
float df = 1.0 / dt / nSteps;
int idf = blockIdx.x * blockDim.x + threadIdx.x;
int idr = blockIdx.y * blockDim.y + threadIdx.y;
int ip = idr * nf + idf;
float freq = idf * df;
float filter_amp = 1.0;
// printf("fffffff = %f\n", freq);
if (idf >= 0 && idf < nf && idr >= 0 && idr < nrec) {
if (freq >= f0 && freq < f1) {
filter_amp = sin(PI / 2.0 * (freq - f0) / (f1 - f0));
} else if (freq >= f1 && freq < f2) {
filter_amp = 1.0;
} else if (freq >= f2 && freq < f3) {
filter_amp = cos(PI / 2.0 * (freq - f2) / (f3 - f2));
} else {
filter_amp = 0.0;
}
d_data_F[ip].x *= filter_amp * filter_amp;
d_data_F[ip].y *= filter_amp * filter_amp;
}
}
__global__ void cuda_filter1d(int nf, int nrec, cuFloatComplex *d_data_F,
cuFloatComplex *d_coef) {
int idf = blockIdx.x * blockDim.x + threadIdx.x;
int idr = blockIdx.y * blockDim.y + threadIdx.y;
int ip = idr * nf + idf;
if (idf >= 0 && idf < nf && idr >= 0 && idr < nrec) {
d_data_F[ip] = cuCmulf(d_data_F[ip], d_coef[idf]);
}
}
__global__ void cuda_normalize(int nz, int nx, float *data, float factor) {
int idz = blockIdx.x * blockDim.x + threadIdx.x;
int idx = blockIdx.y * blockDim.y + threadIdx.y;
if (factor == 0.0) {
printf("Dividing by zero!\n");
return;
}
if (idz >= 0 && idz < nz && idx >= 0 && idx < nx) {
data[idx * nz + idz] *= factor;
} else {
return;
}
}
// windowing in the time axis
__global__ void cuda_window(int nt, int nrec, float dt, float *d_win_start,
float *d_win_end, float *d_weights, float ratio,
float *data) {
int idt = blockIdx.x * blockDim.x + threadIdx.x;
int idr = blockIdx.y * blockDim.y + threadIdx.y;
int ip = idr * nt + idt;
// stupid bug... (I put the if just befor line 614)
if (idt >= 0 && idt < nt && idr >= 0 && idr < nrec) {
float window_amp = 1.0;
float t = idt * dt;
if (ratio > 0.5) {
printf("Dividing by zero!\n");
return;
}
float t0 = d_win_start[idr];
float t3 = d_win_end[idr];
if (t0 == 0.0 && t3 == 0.0) printf("t0 = %f, t3 = %f\n\n", t0, t3);
float t_max = nt * dt;
if (t0 < 0.0) t0 = 0.0;
if (t0 > t_max) t0 = t_max;
if (t3 < 0.0) t3 = 0.0;
if (t3 > t_max) t3 = t_max;
float offset = (t3 - t0) * ratio;
if (offset <= 0.0) {
printf("Window error 1!!\n");
printf("offset = %f\n", offset);
return;
}
float t1 = t0 + offset;
float t2 = t3 - offset;
if (t >= t0 && t < t1) {
window_amp = sin(PI / 2.0 * (t - t0) / (t1 - t0));
} else if (t >= t1 && t < t2) {
window_amp = 1.0;
} else if (t >= t2 && t < t3) {
window_amp = cos(PI / 2.0 * (t - t2) / (t3 - t2));
} else {
window_amp = 0.0;
}
data[ip] *= window_amp * window_amp * d_weights[idr];
} else {
return;
}
}
// overloaded window function: without specifying windows and weights
__global__ void cuda_window(int nt, int nrec, float dt, float ratio,
float *data) {
int idt = blockIdx.x * blockDim.x + threadIdx.x;
int idr = blockIdx.y * blockDim.y + threadIdx.y;
int ip = idr * nt + idt;
if (idt >= 0 && idt < nt && idr >= 0 && idr < nrec) {
float window_amp = 1.0;
float t = idt * dt;
// if (ratio > 0.5) {
// printf("Dividing by zero!\n");
// return;
// }
float t0 = 0;
float t3 = nt * dt;
float offset = nt * dt * ratio;
if (2.0 * offset >= t3 - t0) {
printf("Window error 2!\n");
return;
}
float t1 = t0 + offset;
float t2 = t3 - offset;
if (t >= t0 && t < t1) {
window_amp = sin(PI / 2.0 * (t - t0) / (t1 - t0));
} else if (t >= t1 && t < t2) {
window_amp = 1.0;
} else if (t >= t2 && t < t3) {
window_amp = cos(PI / 2.0 * (t - t2) / (t3 - t2));
} else {
window_amp = 0.0;
}
data[ip] *= window_amp * window_amp;
}
}
// Array padding
__global__ void cuda_embed_crop(int nz, int nx, float *d_data, int nz_pad,
int nx_pad, float *d_data_pad, bool isEmbed) {
int idz = blockIdx.x * blockDim.x + threadIdx.x;
int idx = blockIdx.y * blockDim.y + threadIdx.y;
int ip = idx * nz + idz;
int ip_pad = idx * nz_pad + idz;
if (idz >= 0 && idz < nz && idx >= 0 && idx < nx) {
if (isEmbed) {
d_data_pad[ip_pad] = d_data[ip];
} else {
d_data[ip] = d_data_pad[ip_pad];
}
} else {
return;
}
}
// Dongzhuo Li 02/02/2019
__global__ void cuda_spectrum_update(int nf, int nrec,
cuFloatComplex *d_data_obs_F,
cuFloatComplex *d_data_cal_F,
cuFloatComplex *d_source_F,
cuFloatComplex *d_coef) {
int idr = 0, idf = 0, ip = 0;
const int Block_Size = 512;
const float lambda = 1e-6;
cuFloatComplex c_obs = make_cuFloatComplex(0.0f, 0.0f);
cuFloatComplex c_cal = make_cuFloatComplex(0.0f, 0.0f);
cuFloatComplex c_nominator = make_cuFloatComplex(0.0f, 0.0f);
cuFloatComplex c_denominator = make_cuFloatComplex(0.0f, 0.0f);
__shared__ cuFloatComplex sh_nominator_F[Block_Size];
__shared__ cuFloatComplex sh_denominator_F[Block_Size];
int tid =
threadIdx.x; // one thread handles s receivers (with 512 as the interval)
int bid = blockIdx.x; // one block handles one frequency
sh_nominator_F[tid] = make_cuFloatComplex(0.0f, 0.0f);
sh_denominator_F[tid] = make_cuFloatComplex(0.0f, 0.0f);
__syncthreads();
for (int s = 0; s < (nrec + Block_Size - 1) / Block_Size; s++) {
idr = s * blockDim.x + tid;
idf = bid;
ip = idr * nf + idf;
if (idr >= 0 && idr < nrec && idf >= 0 && idf < nf) {
c_obs = d_data_obs_F[ip];
c_cal = d_data_cal_F[ip];
sh_nominator_F[tid] =
cuCaddf(sh_nominator_F[tid], cuCmulf(cuConjf(c_cal), c_obs));
sh_denominator_F[tid] =
cuCaddf(sh_denominator_F[tid], cuCmulf(cuConjf(c_cal), c_cal));
}
}
__syncthreads();
// do reduction in shared memory
for (int s = blockDim.x / 2; s >= 1; s /= 2) {
if (tid < s) {
sh_nominator_F[tid] =
cuCaddf(sh_nominator_F[tid], sh_nominator_F[tid + s]);
sh_denominator_F[tid] =
cuCaddf(sh_denominator_F[tid], sh_denominator_F[tid + s]);
}
__syncthreads();
}
if (tid == 0) {
sh_denominator_F[0].x += lambda;
// printf("nomi = %f, deno = %f\n", cuCabsf(sh_nominator_F[0]),
// cuCabsf(sh_denominator_F[0]));
sh_nominator_F[0] = cuCdivf(sh_nominator_F[0], sh_denominator_F[0]);
// printf("coef = %f", sh_nominator_F[0].x);
d_coef[bid] = sh_nominator_F[0];
d_source_F[bid] = cuCmulf(d_source_F[bid], sh_nominator_F[0]);
}
// printf("tid = %d\n", tid);
__syncthreads();
for (int s = 0; s < (nrec + Block_Size - 1) / Block_Size; s++) {
idr = s * blockDim.x + tid;
idf = bid;
ip = idr * nf + idf;
if (idr >= 0 && idr < nrec && idf >= 0 && idf < nf) {
d_data_cal_F[ip] = cuCmulf(d_data_cal_F[ip], sh_nominator_F[0]);
// d_data_cal_F[ip].x *= cuCabsf(sh_nominator_F[0]);
// d_data_cal_F[ip].y *= cuCabsf(sh_nominator_F[0]);
// if (tid == 0) printf("ratio = %f\n", cuCabsf(sh_nominator_F[0]));
}
}
__syncthreads();
}
__global__ void cuda_find_absmax(int n, float *data, float *maxval) {
int tid =
threadIdx.x; // one thread handles s receivers (with 512 as the interval)
const int Block_Size = 512;
__shared__ float sh_data[Block_Size];
sh_data[tid] = 0.0;
__syncthreads();
for (int s = 0; s < (n + Block_Size - 1) / Block_Size; s++) {
int ip = s * blockDim.x + tid;
if (ip >= 0 && ip < n) {
if (fabs(data[ip]) > fabs(sh_data[tid])) sh_data[tid] = fabs(data[ip]);
}
}
__syncthreads();
// do reduction in shared memory
for (int s = blockDim.x / 2; s >= 1; s /= 2) {
if (tid < s) {
sh_data[tid] =
(sh_data[tid] >= sh_data[tid + s]) ? sh_data[tid] : sh_data[tid + s];
}
__syncthreads();
}
if (tid == 0) maxval[0] = sh_data[0];
__syncthreads();
}
// 1D band-pass filtering wrapper code
// Steps: padding, FFT, filtering, IFFT, cropping
void bp_filter1d(int nSteps, float dt, int nrec, float *d_data, float *filter) {
int nSteps_pad = 2 * nSteps;
int nfft = nSteps_pad / 2 + 1;
// float df = 1.0/dt/nSteps_pad;
float *d_data_pad;
float f0 = filter[0];
float f1 = filter[1];
float f2 = filter[2];
float f3 = filter[3];
hipfftHandle plan_f, plan_b;
hipfftComplex *d_data_F;
dim3 threads(TX, TY);
dim3 blocks((nSteps + TX - 1) / TX, (nrec + TY - 1) / TY);
// float *h_test = new float[nSteps_pad];
// pad data
CHECK(hipMalloc((void **)&d_data_pad, nSteps_pad * nrec * sizeof(float)));
hipLaunchKernelGGL(( intialArrayGPU), dim3(blocks), dim3(threads), 0, 0, d_data_pad, nSteps_pad, nrec, 0.0);
hipLaunchKernelGGL(( cuda_embed_crop), dim3(blocks), dim3(threads), 0, 0, nSteps, nrec, d_data, nSteps_pad, nrec,
d_data_pad, true);
// CHECK(hipMemcpy(h_test, d_data_pad, nSteps * sizeof(float),
// hipMemcpyDeviceToHost)); displayArray("h_test", h_test, nSteps_pad, 1);
// filtering
CHECK(hipMalloc((void **)&d_data_F, sizeof(hipfftComplex) * nfft * nrec));
hipfftPlan1d(&plan_f, nSteps_pad, HIPFFT_R2C, nrec);
hipfftExecR2C(plan_f, d_data_pad, d_data_F); // forward FFT
hipfftDestroy(plan_f);
hipLaunchKernelGGL(( cuda_bp_filter1d), dim3(blocks), dim3(threads), 0, 0, nSteps_pad, dt, nrec, d_data_F, f0, f1,
f2, f3);
hipfftPlan1d(&plan_b, nSteps_pad, HIPFFT_C2R, nrec);
hipfftExecC2R(plan_b, d_data_F, d_data_pad); // inverse FFT
hipfftDestroy(plan_b);
// CHECK(hipMemcpy(h_test, d_data_pad, nSteps * sizeof(float),
// hipMemcpyDeviceToHost)); displayArray("h_test", h_test, nSteps_pad, 1);
// crop data
hipLaunchKernelGGL(( cuda_embed_crop), dim3(blocks), dim3(threads), 0, 0, nSteps, nrec, d_data, nSteps_pad, nrec,
d_data_pad, false);
// normalization (in the padded fft, the length is nSteps_pad)
hipLaunchKernelGGL(( cuda_normalize), dim3(blocks), dim3(threads), 0, 0, nSteps, nrec, d_data,
1 / float(nSteps_pad));
CHECK(hipFree(d_data_F));
CHECK(hipFree(d_data_pad));
}
// source signature and calculated data update
// Steps: padding, FFT, compute spectrum, filtering, IFFT, cropping
float source_update(int nSteps, float dt, int nrec, float *d_data_obs,
float *d_data_cal, float *d_source,
cuFloatComplex *d_coef) {
int nSteps_pad = 2 * nSteps;
int nfft = nSteps_pad / 2 + 1;
float *d_data_obs_pad, *d_data_cal_pad, *d_source_pad;
hipfftHandle plan_f, plan_b;
hipfftComplex *d_data_obs_F, *d_data_cal_F, *d_source_F;
dim3 threads(TX, TY);
dim3 blocks((nSteps + TX - 1) / TX, (nrec + TY - 1) / TY);
dim3 blocks_pad((nSteps_pad + TX - 1) / TX, (nrec + TY - 1) / TY);
// pad data and window data
CHECK(
hipMalloc((void **)&d_data_obs_pad, nSteps_pad * nrec * sizeof(float)));
CHECK(
hipMalloc((void **)&d_data_cal_pad, nSteps_pad * nrec * sizeof(float)));
CHECK(hipMalloc((void **)&d_source_pad, nSteps_pad * sizeof(float)));
hipLaunchKernelGGL(( intialArrayGPU), dim3(blocks), dim3(threads), 0, 0, d_data_obs_pad, nSteps_pad, nrec, 0.0);
hipLaunchKernelGGL(( intialArrayGPU), dim3(blocks), dim3(threads), 0, 0, d_data_cal_pad, nSteps_pad, nrec, 0.0);
hipLaunchKernelGGL(( intialArrayGPU), dim3((nSteps_pad + 31) / 32), dim3(32), 0, 0, d_source_pad, nSteps_pad, 1,
0.0);
hipLaunchKernelGGL(( cuda_embed_crop), dim3(blocks), dim3(threads), 0, 0, nSteps, nrec, d_data_obs, nSteps_pad,
nrec, d_data_obs_pad, true);
hipLaunchKernelGGL(( cuda_embed_crop), dim3(blocks), dim3(threads), 0, 0, nSteps, nrec, d_data_cal, nSteps_pad,
nrec, d_data_cal_pad, true);
hipLaunchKernelGGL(( cuda_window), dim3(blocks_pad), dim3(threads), 0, 0, nSteps_pad, nrec, dt, 0.01,
d_data_obs_pad);
hipLaunchKernelGGL(( cuda_window), dim3(blocks_pad), dim3(threads), 0, 0, nSteps_pad, nrec, dt, 0.01,
d_data_cal_pad);
hipLaunchKernelGGL(( cuda_embed_crop), dim3((nSteps_pad + 31) / 32), dim3(32), 0, 0,
nSteps, 1, d_source, nSteps_pad, 1, d_source_pad, true);
// CHECK(hipMemcpy(h_test, d_data_pad, nSteps * sizeof(float),
// hipMemcpyDeviceToHost)); displayArray("h_test", h_test, nSteps_pad, 1);
// // filtering
CHECK(hipMalloc((void **)&d_data_obs_F, sizeof(hipfftComplex) * nfft * nrec));
CHECK(hipMalloc((void **)&d_data_cal_F, sizeof(hipfftComplex) * nfft * nrec));
CHECK(hipMalloc((void **)&d_source_F, sizeof(hipfftComplex) * nfft))
hipfftPlan1d(&plan_f, nSteps_pad, HIPFFT_R2C, nrec);
hipfftExecR2C(plan_f, d_data_obs_pad,
d_data_obs_F); // forward FFT of observed data
hipfftExecR2C(plan_f, d_data_cal_pad,
d_data_cal_F); // forward FFT of calculated data
hipfftDestroy(plan_f);
hipfftPlan1d(&plan_f, nSteps_pad, HIPFFT_R2C, 1); // source FFT
hipfftExecR2C(plan_f, d_source_pad, d_source_F);
hipfftDestroy(plan_f);
// cuda_bp_filter1d<<<blocks,threads>>>(nSteps_pad, dt, nrec, d_data_F, f0,
// f1, f2, f3);
hipLaunchKernelGGL(( cuda_spectrum_update), dim3(nfft), dim3(512), 0, 0, nfft, nrec, d_data_obs_F, d_data_cal_F,
d_source_F, d_coef);
hipfftPlan1d(&plan_b, nSteps_pad, HIPFFT_C2R, nrec);
hipfftExecC2R(plan_b, d_data_cal_F, d_data_cal_pad); // inverse FFT
hipfftDestroy(plan_b);
hipfftPlan1d(&plan_b, nSteps_pad, HIPFFT_C2R, 1);
hipfftExecC2R(plan_b, d_source_F, d_source_pad); // inverse FFT
hipfftDestroy(plan_b);
// CHECK(hipMemcpy(h_test, d_data_pad, nSteps * sizeof(float),
// hipMemcpyDeviceToHost)); displayArray("h_test", h_test, nSteps_pad, 1);
// crop data
hipLaunchKernelGGL(( cuda_embed_crop), dim3(blocks), dim3(threads), 0, 0, nSteps, nrec, d_data_cal, nSteps_pad,
nrec, d_data_cal_pad, false);
hipLaunchKernelGGL(( cuda_embed_crop), dim3((nSteps + 31) / 32), dim3(32), 0, 0, nSteps, 1, d_source, nSteps_pad,
1, d_source_pad, false);
// normalization (in the padded fft, the length is nSteps_pad)
// printf("amp = %f\n", amp_ratio);
hipLaunchKernelGGL(( cuda_normalize), dim3(blocks), dim3(threads), 0, 0, nSteps, nrec, d_data_cal,
1.0f / float(nSteps_pad));
hipLaunchKernelGGL(( cuda_normalize), dim3((nSteps + 31) / 32), dim3(32), 0, 0, nSteps, 1, d_source,
1.0f / float(nSteps_pad));
float amp_ratio = amp_ratio_comp(nSteps * nrec, d_data_obs, d_data_cal);
// cuda_normalize<<<blocks,threads>>>(nSteps, nrec, d_data_cal, amp_ratio);
// cuda_normalize<<<(nSteps+31)/32, 32>>>(nSteps, 1, d_source,
// amp_ratio/float(nSteps_pad));
// // update amplitude
//hipLaunchKernelGGL(( cuda_find_absmax), dim3(1), dim3(512), 0, 0, nSteps*nrec, d_data_obs, d_obs_maxval);
//hipLaunchKernelGGL(( cuda_find_absmax), dim3(1), dim3(512), 0, 0, nSteps*nrec, d_data_cal, d_cal_maxval);
// CHECK(hipMemcpy(obs_maxval, d_obs_maxval, sizeof(float),
// hipMemcpyDeviceToHost)); CHECK(hipMemcpy(cal_maxval, d_cal_maxval,
// sizeof(float), hipMemcpyDeviceToHost));
// cuda_normalize<<<blocks,threads>>>(nSteps, nrec,
// d_data_cal, 1.0/amp_ratio); printf("Shot gather amplitude ratio = %f\n",
// obs_maxval[0]/cal_maxval[0]);
CHECK(hipFree(d_data_obs_pad));
CHECK(hipFree(d_data_cal_pad));
CHECK(hipFree(d_data_obs_F));
CHECK(hipFree(d_data_cal_F));
CHECK(hipFree(d_source_pad));
CHECK(hipFree(d_source_F));
return amp_ratio;
}
// source signature and calculated data update
// Steps: padding, FFT, compute spectrum, filtering, IFFT, cropping
void source_update_adj(int nSteps, float dt, int nrec, float *d_data,
float amp_ratio, cuFloatComplex *d_coef) {
int nSteps_pad = 2 * nSteps;
int nfft = nSteps_pad / 2 + 1;
float *d_data_pad;
hipfftHandle plan_f, plan_b;
hipfftComplex *d_data_F;
dim3 threads(TX, TY);
dim3 blocks((nSteps + TX - 1) / TX, (nrec + TY - 1) / TY);
dim3 blocks_pad((nSteps_pad + TX - 1) / TX, (nrec + TY - 1) / TY);
// cuda_normalize<<<blocks,threads>>>(nSteps, nrec, d_data, amp_ratio);
// pad data
CHECK(hipMalloc((void **)&d_data_pad, nSteps_pad * nrec * sizeof(float)));
hipLaunchKernelGGL(( intialArrayGPU), dim3(blocks), dim3(threads), 0, 0, d_data_pad, nSteps_pad, nrec, 0.0);
hipLaunchKernelGGL(( cuda_embed_crop), dim3(blocks), dim3(threads), 0, 0, nSteps, nrec, d_data, nSteps_pad, nrec,
d_data_pad, true);
hipLaunchKernelGGL(( cuda_window), dim3(blocks_pad), dim3(threads), 0, 0, nSteps_pad, nrec, dt, 0.01, d_data_pad);
CHECK(hipMalloc((void **)&d_data_F, sizeof(hipfftComplex) * nfft * nrec));
hipfftPlan1d(&plan_f, nSteps_pad, HIPFFT_R2C, nrec);
hipfftExecR2C(plan_f, d_data_pad, d_data_F);
hipfftDestroy(plan_f);
// update data
hipLaunchKernelGGL(( cuda_filter1d), dim3(blocks), dim3(threads), 0, 0, nfft, nrec, d_data_F, d_coef);
hipfftPlan1d(&plan_b, nSteps_pad, HIPFFT_C2R, nrec);
hipfftExecC2R(plan_b, d_data_F, d_data_pad); // inverse FFT
hipfftDestroy(plan_b);
// crop data
hipLaunchKernelGGL(( cuda_embed_crop), dim3(blocks), dim3(threads), 0, 0, nSteps, nrec, d_data, nSteps_pad, nrec,
d_data_pad, false);
// normalization (in the padded fft, the length is nSteps_pad)
// printf("amp_adj = %f\n", amp_ratio);
hipLaunchKernelGGL(( cuda_normalize), dim3(blocks), dim3(threads), 0, 0, nSteps, nrec, d_data,
amp_ratio / float(nSteps_pad));
CHECK(hipFree(d_data_pad));
CHECK(hipFree(d_data_F));
}
float amp_ratio_comp(int n, float *d_data_obs, float *d_data_cal) {
float *obs_maxval = NULL, *cal_maxval = NULL;
float *d_obs_maxval, *d_cal_maxval;
obs_maxval = (float *)malloc(sizeof(float));
cal_maxval = (float *)malloc(sizeof(float));
CHECK(hipMalloc((void **)&d_obs_maxval, sizeof(float)));
CHECK(hipMalloc((void **)&d_cal_maxval, sizeof(float)));
hipLaunchKernelGGL(( cuda_find_absmax), dim3(1), dim3(512), 0, 0, n, d_data_obs, d_obs_maxval);
hipLaunchKernelGGL(( cuda_find_absmax), dim3(1), dim3(512), 0, 0, n, d_data_cal, d_cal_maxval);
CHECK(hipMemcpy(obs_maxval, d_obs_maxval, sizeof(float),
hipMemcpyDeviceToHost));
CHECK(hipMemcpy(cal_maxval, d_cal_maxval, sizeof(float),
hipMemcpyDeviceToHost));
// printf("Shot gather amplitude ratio = %f\n", obs_maxval[0]/cal_maxval[0]);
float ratio = 0.0;
if (cal_maxval[0] != 0.0) {
ratio = obs_maxval[0] / cal_maxval[0];
}
CHECK(hipFree(d_obs_maxval));
CHECK(hipFree(d_cal_maxval));
delete[] obs_maxval;
delete[] cal_maxval;
return ratio;
} | d1d90c04ad8e9aa7a41d6f090ca86d5262eb6ea9.cu | #define d_Mu(x, y) d_Mu[(y) * (nx) + (x)]
#define d_ave_Mu(x, y) d_ave_Mu[(y) * (nx) + (x)]
#define d_field(z, x) d_field[(x) * (nz) + (z)]
#define d_bnd(x, indT) d_bnd[(indT) * (len_Bnd_vec) + (x)]
#define d_Den(x, y) d_Den[(y) * (nx) + (x)]
#define d_ave_Byc_a(x, y) d_ave_Byc_a[(y) * (nx) + (x)]
#define d_ave_Byc_b(x, y) d_ave_Byc_b[(y) * (nx) + (x)]
#include "utilities.h"
void fileBinLoad(float *h_bin, int size, std::string fname) {
FILE *fp = fopen(fname.c_str(), "rb");
if (fp == NULL) {
std::cout << "Attempted to read " << fname << std::endl;
printf("File reading error!\n");
exit(1);
} else {
size_t sizeRead = fread(h_bin, sizeof(float), size, fp);
}
fclose(fp);
}
void fileBinWrite(float *h_bin, int size, std::string fname) {
FILE *fp = fopen(fname.c_str(), "wb");
if (fp == NULL) {
printf("File writing error!\n");
exit(1);
} else {
fwrite(h_bin, sizeof(float), size, fp);
}
fclose(fp);
}
void fileBinWriteDouble(double *h_bin, int size, std::string fname) {
FILE *fp = fopen(fname.c_str(), "wb");
if (fp == NULL) {
printf("File writing error!\n");
exit(1);
} else {
fwrite(h_bin, sizeof(double), size, fp);
}
fclose(fp);
}
void initialArray(float *ip, int size, float value) {
for (int i = 0; i < size; i++) {
ip[i] = value;
// printf("value = %f\n", value);
}
}
void initialArray(double *ip, int size, double value) {
for (int i = 0; i < size; i++) {
ip[i] = value;
// printf("value = %f\n", value);
}
}
__global__ void intialArrayGPU(float *ip, int nx, int ny, float value) {
int gidx = threadIdx.x + blockDim.x * blockIdx.x;
int gidy = threadIdx.y + blockDim.y * blockIdx.y;
if (gidx < nx && gidy < ny) {
int offset = gidx + gidy * nx;
ip[offset] = value;
}
}
__global__ void assignArrayGPU(float *ip_in, float *ip_out, int nx, int ny) {
int gidx = threadIdx.x + blockDim.x * blockIdx.x;
int gidy = threadIdx.y + blockDim.y * blockIdx.y;
if (gidx < nx && gidy < ny) {
int offset = gidx + gidy * nx;
ip_out[offset] = ip_in[offset];
}
}
void displayArray(std::string s, float *ip, int nx, int ny) {
// printf("ip: \n");
// printf("%s: \n", s);
std::cout << s << ": " << std::endl;
for (int i = 0; i < ny; i++) {
for (int j = 0; j < nx; j++) {
// printf("ip[%d, %d] = %f ", i, j, ip[i*nx+j]);
printf("%f ", ip[i * nx + j]);
}
printf("\n");
}
printf("\n\n\n");
}
__global__ void moduliInit(float *d_Cp, float *d_Cs, float *d_Den,
float *d_Lambda, float *d_Mu, int nx, int ny) {
// printf("Hello, world!\n");
int gidx = threadIdx.x + blockDim.x * blockIdx.x;
int gidy = threadIdx.y + blockDim.y * blockIdx.y;
int offset = gidx + gidy * nx;
if (gidx < nx && gidy < ny) {
// printf("offset = %d ", offset);
// printf("gridDim.x = %d ", gridDim.x);
// printf("blockIdx.y = %d ", blockIdx.y);
d_Mu[offset] = powf(d_Cs[offset], 2) * d_Den[offset];
d_Lambda[offset] =
d_Den[offset] * (powf(d_Cp[offset], 2) - 2 * powf(d_Cs[offset], 2));
if (d_Lambda[offset] < 0) {
printf("Lambda is negative!!!");
}
}
}
__global__ void velInit(float *d_Lambda, float *d_Mu, float *d_Den, float *d_Cp,
float *d_Cs, int nx, int ny) {
// printf("Hello, world!\n");
int gidx = threadIdx.x + blockDim.x * blockIdx.x;
int gidy = threadIdx.y + blockDim.y * blockIdx.y;
int offset = gidx + gidy * nx;
if (gidx < nx && gidy < ny) {
// printf("offset = %d ", offset);
// printf("gridDim.x = %d ", gridDim.x);
// printf("blockIdx.y = %d ", blockIdx.y);
d_Cp[offset] =
sqrt((d_Lambda[offset] + 2.0 * d_Mu[offset]) / d_Den[offset]);
d_Cs[offset] = sqrt((d_Mu[offset]) / d_Den[offset]);
}
}
__global__ void aveMuInit(float *d_Mu, float *d_ave_Mu, int nx, int ny) {
int gidx = threadIdx.x + blockDim.x * blockIdx.x;
int gidy = threadIdx.y + blockDim.y * blockIdx.y;
float a, b, c, d;
if (gidx >= 2 && gidx <= nx - 3 && gidy >= 2 && gidy <= ny - 3) {
a = d_Mu(gidx, gidy);
b = d_Mu(gidx + 1, gidy);
c = d_Mu(gidx, gidy + 1);
d = d_Mu(gidx + 1, gidy + 1);
if (a == 0.0 || b == 0.0 || c == 0.0 || d == 0.0) {
d_ave_Mu(gidx, gidy) = 0.0;
} else {
d_ave_Mu(gidx, gidy) = 4.0 / (1.0 / a + 1.0 / b + 1.0 / c + 1.0 / d);
}
}
}
__global__ void aveBycInit(float *d_Den, float *d_ave_Byc_a, float *d_ave_Byc_b,
int nx, int ny) {
int gidx = threadIdx.x + blockDim.x * blockIdx.x;
int gidy = threadIdx.y + blockDim.y * blockIdx.y;
if (gidx >= 2 && gidx <= nx - 3 && gidy >= 2 && gidy <= ny - 3) {
d_ave_Byc_a(gidx, gidy) = 2.0 / (d_Den(gidx + 1, gidy) + d_Den(gidx, gidy));
d_ave_Byc_b(gidx, gidy) = 2.0 / (d_Den(gidx, gidy + 1) + d_Den(gidx, gidy));
} else {
return;
}
}
__global__ void gpuMinus(float *d_out, float *d_in1, float *d_in2, int nx,
int ny) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int idy = threadIdx.y + blockDim.y * blockIdx.y;
// only compute last N-1 time samples for misfits!!!!!!!! DL 02/25/2019
if (idx < nx && idy < ny && idx > 0) {
d_out[(idy) * (nx) + (idx)] =
d_in1[(idy) * (nx) + (idx)] - d_in2[(idy) * (nx) + (idx)];
} else if (idx == 0 && idy < ny) {
d_out[(idy) * (nx) + (idx)] = 0.0;
} else {
return;
}
}
__global__ void cuda_cal_objective(float *obj, float *err, int ng)
/*< calculate the value of objective function: obj >*/
{
const int Block_Size = 512;
__shared__ float sdata[Block_Size];
int tid = threadIdx.x;
sdata[tid] = 0.0f;
for (int s = 0; s < (ng + Block_Size - 1) / Block_Size; s++) {
int id = s * blockDim.x + threadIdx.x;
float a = (id < ng) ? err[id] : 0.0f;
// sdata[tid] += a*a;
sdata[tid] += powf(a, 2);
}
__syncthreads();
/* do reduction in shared mem */
// for(int s=blockDim.x/2; s>32; s>>=1) {
// if (threadIdx.x < s) sdata[tid] += sdata[tid + s];
// __syncthreads();
// }
// if (tid < 32) {
// if (blockDim.x >= 64) { sdata[tid] += sdata[tid + 32]; }
// if (blockDim.x >= 32) { sdata[tid] += sdata[tid + 16]; }
// if (blockDim.x >= 16) { sdata[tid] += sdata[tid + 8]; }
// if (blockDim.x >= 8) { sdata[tid] += sdata[tid + 4]; }
// if (blockDim.x >= 4) { sdata[tid] += sdata[tid + 2]; }
// if (blockDim.x >= 2) { sdata[tid] += sdata[tid + 1]; }
// }
for (int s = blockDim.x / 2; s >= 1; s /= 2) {
if (threadIdx.x < s) sdata[tid] += sdata[tid + s];
__syncthreads();
}
if (tid == 0) {
*obj = sdata[0];
}
}
float cal_objective(float *array, int N) {
float misfit = 0.0;
printf("hhh\n");
for (int i = 0; i < N; i++) {
misfit += array[i] * array[i];
}
return misfit;
}
float compCpAve(float *array, int N) {
float temp = 0.0;
for (int i = 0; i < N; i++) {
temp += array[i];
}
temp = temp / float(N);
return temp;
}
void compCourantNumber(float *h_Cp, int size, float dt, float dz, float dx) {
float max = h_Cp[0];
float Courant_number = 0.0;
for (int i = 0; i < size; i++) {
if (h_Cp[i] > max) {
max = h_Cp[i];
}
}
float dh_min = (dz < dx) ? dz : dx;
// Courant_number = max * dt * sqrtf(powf(1.0 / dz, 2) + powf(1.0 / dx, 2));
Courant_number = max * dt * sqrtf(2.0) * (1.0 / 24.0 + 9.0 / 8.0) / dh_min;
std::cout << "Courant_number = " << Courant_number << std::endl;
if (Courant_number > 1.0) exit(1);
}
void cpmlInit(float *K, float *a, float *b, float *K_half, float *a_half,
float *b_half, int N, int nPml, float dh, float f0, float dt,
float CpAve) {
float *damp, *damp_half, *alpha, *alpha_half;
float d0_h = 0.0;
float Rcoef = 0.0008;
float depth_in_pml = 0.0;
float depth_normalized = 0.0;
float thickness_PML = 0.0;
// const float PI = 3.141592653589793238462643383279502884197169;
const float K_MAX_PML = 2.0;
const float ALPHA_MAX_PML = 2.0 * PI * (f0 / 2.0);
const float NPOWER = 8.0;
const float c1 = 0.25, c2 = 0.75, c3 = 0.0;
// const float c1 = 0.0, c2 = 1.0, c3 = 0.0;
thickness_PML = nPml * dh; // changed here
CpAve = 3000.0; // DL make this model independent
d0_h = -(NPOWER + 1) * CpAve * log(Rcoef) / (2.0 * thickness_PML);
damp = (float *)malloc(N * sizeof(float));
damp_half = (float *)malloc(N * sizeof(float));
alpha = (float *)malloc(N * sizeof(float));
alpha_half = (float *)malloc(N * sizeof(float));
initialArray(damp, N, 0.0);
initialArray(damp_half, N, 0.0);
initialArray(K, N, 1.0);
initialArray(K_half, N, 1.0);
initialArray(alpha, N, 0.0);
initialArray(alpha_half, N, 0.0);
initialArray(a, N, 0.0);
initialArray(a_half, N, 0.0);
initialArray(b, N, 0.0);
initialArray(b_half, N, 0.0);
for (int i = 0; i < N; i++) {
// left edge
depth_in_pml = (nPml - i) * dh;
if (depth_in_pml >= 0.0) {
depth_normalized = depth_in_pml / thickness_PML;
damp[i] =
d0_h * (c1 * depth_normalized + c2 * pow(depth_normalized, NPOWER) +
c3 * pow(depth_normalized, 2 * NPOWER));
K[i] = 1.0 + (K_MAX_PML - 1.0) * pow(depth_normalized, NPOWER);
alpha[i] = ALPHA_MAX_PML * (1.0 - depth_normalized);
}
if (alpha[i] < 0.0) {
std::cout << "CPML alpha < 0.0 --" << __LINE__ << std::endl;
exit(1);
}
// half the grid points
depth_in_pml = (nPml - i - 0.5) * dh;
if (depth_in_pml >= 0.0) {
depth_normalized = depth_in_pml / thickness_PML;
damp_half[i] =
d0_h * (c1 * depth_normalized + c2 * pow(depth_normalized, NPOWER) +
c3 * pow(depth_normalized, 2 * NPOWER));
K_half[i] = 1.0 + (K_MAX_PML - 1.0) * pow(depth_normalized, NPOWER);
alpha_half[i] = ALPHA_MAX_PML * (1.0 - depth_normalized);
}
if (alpha_half[i] < 0.0) {
std::cout << "CPML alpha_half < 0.0 --" << __LINE__ << std::endl;
exit(1);
}
// right edge
depth_in_pml = (nPml - N + i) * dh;
if (depth_in_pml >= 0.0) {
depth_normalized = depth_in_pml / thickness_PML;
damp[i] =
d0_h * (c1 * depth_normalized + c2 * pow(depth_normalized, NPOWER) +
c3 * pow(depth_normalized, 2 * NPOWER));
K[i] = 1.0 + (K_MAX_PML - 1.0) * pow(depth_normalized, NPOWER);
alpha[i] = ALPHA_MAX_PML * (1.0 - depth_normalized);
}
if (alpha[i] < 0.0) {
std::cout << "CPML alpha < 0.0 --" << __LINE__ << std::endl;
exit(1);
}
depth_in_pml = (nPml - N + i + 0.5) * dh;
if (depth_in_pml >= 0.0) {
depth_normalized = depth_in_pml / thickness_PML;
damp_half[i] =
d0_h * (c1 * depth_normalized + c2 * pow(depth_normalized, NPOWER) +
c3 * pow(depth_normalized, 2 * NPOWER));
K_half[i] = 1.0 + (K_MAX_PML - 1.0) * powf(depth_normalized, NPOWER);
alpha_half[i] = ALPHA_MAX_PML * (1.0 - depth_normalized);
}
if (alpha_half[i] < 0.0) {
std::cout << "CPML alpha_half < 0.0 --" << __LINE__ << std::endl;
exit(1);
}
if (alpha[i] < 0.0) {
alpha[i] = 0.0;
}
if (alpha_half[i] < 0.0) {
alpha_half[i] = 0.0;
}
b[i] = expf(-(damp[i] / K[i] + alpha[i]) * dt);
b_half[i] = expf(-(damp_half[i] / K_half[i] + alpha_half[i]) * dt);
if (fabs(damp[i]) > 1.0e-6) {
a[i] = damp[i] * (b[i] - 1.0) / (K[i] * (damp[i] + K[i] * alpha[i]));
}
if (fabs(damp_half[i]) > 1.0e-6) {
a_half[i] = damp_half[i] * (b_half[i] - 1.0) /
(K_half[i] * (damp_half[i] + K_half[i] * alpha_half[i]));
}
}
free(damp);
free(damp_half);
free(alpha);
free(alpha_half);
}
// Dongzhuo Li 05/15/2019
__global__ void from_bnd(float *d_field, float *d_bnd, int nz, int nx,
int nzBnd, int nxBnd, int len_Bnd_vec, int nLayerStore,
int indT, int nPml, int nPad, int nSteps) {
int idxBnd = threadIdx.x + blockDim.x * blockIdx.x;
int iRow, jCol;
if (idxBnd >= 0 && idxBnd <= nLayerStore * nzBnd - 1) {
jCol = idxBnd / nzBnd;
iRow = idxBnd - jCol * nzBnd;
d_bnd(idxBnd, indT) = d_field((iRow + nPml - 2), (jCol + nPml - 2));
} else if (idxBnd >= nLayerStore * nzBnd &&
idxBnd <= 2 * nLayerStore * nzBnd - 1) {
jCol = (idxBnd - nLayerStore * nzBnd) / nzBnd;
iRow = (idxBnd - nLayerStore * nzBnd) - jCol * nzBnd;
d_bnd(idxBnd, indT) =
d_field((iRow + nPml - 2), (nx - nPml - jCol - 1 + 2));
} else if (idxBnd >= 2 * nLayerStore * nzBnd &&
idxBnd <= nLayerStore * (2 * nzBnd + nxBnd) - 1) {
iRow = (idxBnd - 2 * nLayerStore * nzBnd) / nxBnd;
jCol = (idxBnd - 2 * nLayerStore * nzBnd) - iRow * nxBnd;
d_bnd(idxBnd, indT) = d_field((iRow + nPml - 2), (jCol + nPml - 2));
} else if (idxBnd >= nLayerStore * (2 * nzBnd + nxBnd) &&
idxBnd <= 2 * nLayerStore * (nzBnd + nxBnd) - 1) {
iRow = (idxBnd - nLayerStore * (2 * nzBnd + nxBnd)) / nxBnd;
jCol = (idxBnd - nLayerStore * (2 * nzBnd + nxBnd)) - iRow * nxBnd;
d_bnd(idxBnd, indT) =
d_field((nz - nPml - nPad - iRow - 1 + 2), (jCol + nPml - 2));
} else {
return;
}
}
// Dongzhuo Li 05/15/2019
__global__ void to_bnd(float *d_field, float *d_bnd, int nz, int nx, int nzBnd,
int nxBnd, int len_Bnd_vec, int nLayerStore, int indT,
int nPml, int nPad, int nSteps) {
int idxBnd = threadIdx.x + blockDim.x * blockIdx.x;
int iRow, jCol;
if (idxBnd >= 0 && idxBnd <= nLayerStore * nzBnd - 1) {
jCol = idxBnd / nzBnd;
iRow = idxBnd - jCol * nzBnd;
d_field((iRow + nPml - 2), (jCol + nPml - 2)) = d_bnd(idxBnd, indT);
} else if (idxBnd >= nLayerStore * nzBnd &&
idxBnd <= 2 * nLayerStore * nzBnd - 1) {
jCol = (idxBnd - nLayerStore * nzBnd) / nzBnd;
iRow = (idxBnd - nLayerStore * nzBnd) - jCol * nzBnd;
d_field((iRow + nPml - 2), (nx - nPml - jCol - 1 + 2)) =
d_bnd(idxBnd, indT);
} else if (idxBnd >= 2 * nLayerStore * nzBnd &&
idxBnd <= nLayerStore * (2 * nzBnd + nxBnd) - 1) {
iRow = (idxBnd - 2 * nLayerStore * nzBnd) / nxBnd;
jCol = (idxBnd - 2 * nLayerStore * nzBnd) - iRow * nxBnd;
d_field((iRow + nPml - 2), (jCol + nPml - 2)) = d_bnd(idxBnd, indT);
} else if (idxBnd >= nLayerStore * (2 * nzBnd + nxBnd) &&
idxBnd <= 2 * nLayerStore * (nzBnd + nxBnd) - 1) {
iRow = (idxBnd - nLayerStore * (2 * nzBnd + nxBnd)) / nxBnd;
jCol = (idxBnd - nLayerStore * (2 * nzBnd + nxBnd)) - iRow * nxBnd;
d_field((nz - nPml - nPad - iRow - 1 + 2), (jCol + nPml - 2)) =
d_bnd(idxBnd, indT);
} else {
return;
}
}
// // Dongzhuo Li 02/24/2019
// __global__ void from_bnd(float *d_field, float *d_bnd, int nz, int nx, int
// nzBnd, \
// int nxBnd, int len_Bnd_vec, int nLayerStore, int indT, int nPml, int nPad,
// int nSteps) {
// int idxBnd = threadIdx.x + blockDim.x*blockIdx.x;
// int iRow,jCol;
// if(idxBnd>=0 && idxBnd<=nLayerStore*nzBnd-1) {
// jCol = idxBnd/nzBnd;
// iRow = idxBnd - jCol*nzBnd;
// d_bnd(idxBnd,indT) = d_field((iRow),(jCol));
// }
// else if(idxBnd>=nLayerStore*nzBnd && idxBnd<=2*nLayerStore*nzBnd-1){
// jCol = (idxBnd-nLayerStore*nzBnd)/nzBnd;
// iRow = (idxBnd-nLayerStore*nzBnd) - jCol*nzBnd;
// d_bnd(idxBnd,indT) = d_field((iRow),(nx-jCol-1));
// }
// else if(idxBnd>=2*nLayerStore*nzBnd &&
// idxBnd<=nLayerStore*(2*nzBnd+nxBnd)-1) {
// iRow = (idxBnd - 2*nLayerStore*nzBnd)/nxBnd;
// jCol = (idxBnd - 2*nLayerStore*nzBnd) - iRow*nxBnd;
// d_bnd(idxBnd,indT) = d_field((iRow),(jCol));
// }
// else if(idxBnd>=nLayerStore*(2*nzBnd+nxBnd) &&
// idxBnd<=2*nLayerStore*(nzBnd+nxBnd)-1) {
// iRow = (idxBnd - nLayerStore*(2*nzBnd+nxBnd))/nxBnd;
// jCol = (idxBnd - nLayerStore*(2*nzBnd+nxBnd)) - iRow*nxBnd;
// d_bnd(idxBnd,indT) = d_field((nz-nPad-iRow-1),(jCol));
// }
// else {
// return;
// }
// // if(idxBnd>=0 && idxBnd<=2*(nzBnd+nxBnd)-1) {
// // d_bnd(idxBnd, indT) = 1.0;
// // } else {
// // return;
// // }
// }
// // Dongzhuo Li 02/24/2019
// __global__ void to_bnd(float *d_field, float *d_bnd, int nz, int nx, int
// nzBnd, \
// int nxBnd, int len_Bnd_vec, int nLayerStore, int indT, int nPml, int nPad,
// int nSteps) {
// int idxBnd = threadIdx.x + blockDim.x*blockIdx.x;
// int iRow,jCol;
// if(idxBnd>=0 && idxBnd<=nLayerStore*nzBnd-1) {
// jCol = idxBnd/nzBnd;
// iRow = idxBnd - jCol*nzBnd;
// d_field((iRow),(jCol)) = d_bnd(idxBnd,indT);
// }
// else if(idxBnd>=nLayerStore*nzBnd && idxBnd<=2*nLayerStore*nzBnd-1){
// jCol = (idxBnd-nLayerStore*nzBnd)/nzBnd;
// iRow = (idxBnd-nLayerStore*nzBnd) - jCol*nzBnd;
// d_field((iRow),(nx-jCol-1)) = d_bnd(idxBnd,indT);
// }
// else if(idxBnd>=2*nLayerStore*nzBnd &&
// idxBnd<=nLayerStore*(2*nzBnd+nxBnd)-1) {
// iRow = (idxBnd - 2*nLayerStore*nzBnd)/nxBnd;
// jCol = (idxBnd - 2*nLayerStore*nzBnd) - iRow*nxBnd;
// d_field((iRow),(jCol)) = d_bnd(idxBnd,indT);
// }
// else if(idxBnd>=nLayerStore*(2*nzBnd+nxBnd) &&
// idxBnd<=2*nLayerStore*(nzBnd+nxBnd)-1) {
// iRow = (idxBnd - nLayerStore*(2*nzBnd+nxBnd))/nxBnd;
// jCol = (idxBnd - nLayerStore*(2*nzBnd+nxBnd)) - iRow*nxBnd;
// d_field((nz-nPad-iRow-1),(jCol)) = d_bnd(idxBnd,indT);
// }
// else {
// return;
// }
// }
__global__ void src_rec_gauss_amp(float *gauss_amp, int nz, int nx) {
int gidz = blockIdx.x * blockDim.x + threadIdx.x;
int gidx = blockIdx.y * blockDim.y + threadIdx.y;
if (gidz >= 0 && gidz < nz && gidx >= 0 && gidx < nx) {
int idz = gidz - nz / 2;
int idx = gidx - nx / 2;
gauss_amp[gidz + gidx * nz] =
expf(-1000.0 * (powf(float(idz), 2) + powf(float(idx), 2)));
// printf("gidz=%d, gidx=%d, gauss_amp=%.10f\n", gidz, gidx,
// gauss_amp[gidz + gidx * nz]);
} else {
return;
}
}
__global__ void add_source(float *d_szz, float *d_sxx, float amp, int nz,
bool isFor, int z_loc, int x_loc, float dt,
float *gauss_amp) {
// int id = threadIdx.x + blockDim.x * blockIdx.x;
int gidz = blockIdx.x * blockDim.x + threadIdx.x;
int gidx = blockIdx.y * blockDim.y + threadIdx.y;
float scale = pow(1500.0, 2);
if (isFor) {
if (gidz >= 0 && gidz < 9 && gidx >= 0 && gidx < 9) {
int idz = gidz - 9 / 2;
int idx = gidx - 9 / 2;
// printf("amp = %f ", amp);
d_szz[(z_loc + idz) + nz * (x_loc + idx)] +=
scale * amp * dt * gauss_amp[gidz + gidx * 9];
// crosswell borehole source (can be modified) assume cp/cs = sqrt(3.0)
d_sxx[(z_loc + idz) + nz * (x_loc + idx)] +=
RSXXZZ * scale * amp * dt * gauss_amp[gidz + gidx * 9];
} else {
return;
}
} else {
if (gidz >= 0 && gidz < 9 && gidx >= 0 && gidx < 9) {
int idz = gidz - 9 / 2;
int idx = gidx - 9 / 2;
// printf("amp = %f ", amp);
d_szz[(z_loc + idz) + nz * (x_loc + idx)] -=
scale * amp * dt * gauss_amp[gidz + gidx * 9];
d_sxx[(z_loc + idz) + nz * (x_loc + idx)] -=
RSXXZZ * scale * amp * dt * gauss_amp[gidz + gidx * 9];
} else {
return;
}
}
}
__global__ void recording(float *d_szz, float *d_sxx, int nz, float *d_data,
int iShot, int it, int nSteps, int nrec, int *d_z_rec,
int *d_x_rec) {
int iRec = threadIdx.x + blockDim.x * blockIdx.x;
if (iRec >= nrec) {
return;
}
d_data[(iRec) * (nSteps) + (it)] =
d_szz[d_z_rec[iRec] + d_x_rec[iRec] * nz] +
RSXXZZ * d_sxx[d_z_rec[iRec] + d_x_rec[iRec] * nz];
}
__global__ void res_injection(float *d_szz_adj, float *d_sxx_adj, int nz,
float *d_res, int it, float dt, int nSteps,
int nrec, int *d_z_rec, int *d_x_rec) {
int iRec = threadIdx.x + blockDim.x * blockIdx.x;
if (iRec >= nrec) {
return;
}
d_szz_adj[d_z_rec[iRec] + nz * d_x_rec[iRec]] +=
d_res[(iRec) * (nSteps) + (it)];
d_sxx_adj[d_z_rec[iRec] + nz * d_x_rec[iRec]] +=
RSXXZZ * d_res[(iRec) * (nSteps) + (it)];
}
__global__ void source_grad(float *d_szz_adj, float *d_sxx_adj, int nz,
float *d_StfGrad, int it, float dt, int z_src,
int x_src) {
int id = threadIdx.x + blockDim.x * blockIdx.x;
if (id == 0) {
d_StfGrad[it] = -(d_szz_adj[z_src + nz * x_src] +
RSXXZZ * d_sxx_adj[z_src + nz * x_src]) *
dt;
} else {
return;
}
}
// Dongzhuo Li 01/28/2019
__global__ void cuda_bp_filter1d(int nSteps, float dt, int nrec,
cufftComplex *d_data_F, float f0, float f1,
float f2, float f3) {
int nf = nSteps / 2 + 1;
float df = 1.0 / dt / nSteps;
int idf = blockIdx.x * blockDim.x + threadIdx.x;
int idr = blockIdx.y * blockDim.y + threadIdx.y;
int ip = idr * nf + idf;
float freq = idf * df;
float filter_amp = 1.0;
// printf("fffffff = %f\n", freq);
if (idf >= 0 && idf < nf && idr >= 0 && idr < nrec) {
if (freq >= f0 && freq < f1) {
filter_amp = sin(PI / 2.0 * (freq - f0) / (f1 - f0));
} else if (freq >= f1 && freq < f2) {
filter_amp = 1.0;
} else if (freq >= f2 && freq < f3) {
filter_amp = cos(PI / 2.0 * (freq - f2) / (f3 - f2));
} else {
filter_amp = 0.0;
}
d_data_F[ip].x *= filter_amp * filter_amp;
d_data_F[ip].y *= filter_amp * filter_amp;
}
}
__global__ void cuda_filter1d(int nf, int nrec, cuFloatComplex *d_data_F,
cuFloatComplex *d_coef) {
int idf = blockIdx.x * blockDim.x + threadIdx.x;
int idr = blockIdx.y * blockDim.y + threadIdx.y;
int ip = idr * nf + idf;
if (idf >= 0 && idf < nf && idr >= 0 && idr < nrec) {
d_data_F[ip] = cuCmulf(d_data_F[ip], d_coef[idf]);
}
}
__global__ void cuda_normalize(int nz, int nx, float *data, float factor) {
int idz = blockIdx.x * blockDim.x + threadIdx.x;
int idx = blockIdx.y * blockDim.y + threadIdx.y;
if (factor == 0.0) {
printf("Dividing by zero!\n");
return;
}
if (idz >= 0 && idz < nz && idx >= 0 && idx < nx) {
data[idx * nz + idz] *= factor;
} else {
return;
}
}
// windowing in the time axis
__global__ void cuda_window(int nt, int nrec, float dt, float *d_win_start,
float *d_win_end, float *d_weights, float ratio,
float *data) {
int idt = blockIdx.x * blockDim.x + threadIdx.x;
int idr = blockIdx.y * blockDim.y + threadIdx.y;
int ip = idr * nt + idt;
// stupid bug... (I put the if just befor line 614)
if (idt >= 0 && idt < nt && idr >= 0 && idr < nrec) {
float window_amp = 1.0;
float t = idt * dt;
if (ratio > 0.5) {
printf("Dividing by zero!\n");
return;
}
float t0 = d_win_start[idr];
float t3 = d_win_end[idr];
if (t0 == 0.0 && t3 == 0.0) printf("t0 = %f, t3 = %f\n\n", t0, t3);
float t_max = nt * dt;
if (t0 < 0.0) t0 = 0.0;
if (t0 > t_max) t0 = t_max;
if (t3 < 0.0) t3 = 0.0;
if (t3 > t_max) t3 = t_max;
float offset = (t3 - t0) * ratio;
if (offset <= 0.0) {
printf("Window error 1!!\n");
printf("offset = %f\n", offset);
return;
}
float t1 = t0 + offset;
float t2 = t3 - offset;
if (t >= t0 && t < t1) {
window_amp = sin(PI / 2.0 * (t - t0) / (t1 - t0));
} else if (t >= t1 && t < t2) {
window_amp = 1.0;
} else if (t >= t2 && t < t3) {
window_amp = cos(PI / 2.0 * (t - t2) / (t3 - t2));
} else {
window_amp = 0.0;
}
data[ip] *= window_amp * window_amp * d_weights[idr];
} else {
return;
}
}
// overloaded window function: without specifying windows and weights
__global__ void cuda_window(int nt, int nrec, float dt, float ratio,
float *data) {
int idt = blockIdx.x * blockDim.x + threadIdx.x;
int idr = blockIdx.y * blockDim.y + threadIdx.y;
int ip = idr * nt + idt;
if (idt >= 0 && idt < nt && idr >= 0 && idr < nrec) {
float window_amp = 1.0;
float t = idt * dt;
// if (ratio > 0.5) {
// printf("Dividing by zero!\n");
// return;
// }
float t0 = 0;
float t3 = nt * dt;
float offset = nt * dt * ratio;
if (2.0 * offset >= t3 - t0) {
printf("Window error 2!\n");
return;
}
float t1 = t0 + offset;
float t2 = t3 - offset;
if (t >= t0 && t < t1) {
window_amp = sin(PI / 2.0 * (t - t0) / (t1 - t0));
} else if (t >= t1 && t < t2) {
window_amp = 1.0;
} else if (t >= t2 && t < t3) {
window_amp = cos(PI / 2.0 * (t - t2) / (t3 - t2));
} else {
window_amp = 0.0;
}
data[ip] *= window_amp * window_amp;
}
}
// Array padding
__global__ void cuda_embed_crop(int nz, int nx, float *d_data, int nz_pad,
int nx_pad, float *d_data_pad, bool isEmbed) {
int idz = blockIdx.x * blockDim.x + threadIdx.x;
int idx = blockIdx.y * blockDim.y + threadIdx.y;
int ip = idx * nz + idz;
int ip_pad = idx * nz_pad + idz;
if (idz >= 0 && idz < nz && idx >= 0 && idx < nx) {
if (isEmbed) {
d_data_pad[ip_pad] = d_data[ip];
} else {
d_data[ip] = d_data_pad[ip_pad];
}
} else {
return;
}
}
// Dongzhuo Li 02/02/2019
__global__ void cuda_spectrum_update(int nf, int nrec,
cuFloatComplex *d_data_obs_F,
cuFloatComplex *d_data_cal_F,
cuFloatComplex *d_source_F,
cuFloatComplex *d_coef) {
int idr = 0, idf = 0, ip = 0;
const int Block_Size = 512;
const float lambda = 1e-6;
cuFloatComplex c_obs = make_cuFloatComplex(0.0f, 0.0f);
cuFloatComplex c_cal = make_cuFloatComplex(0.0f, 0.0f);
cuFloatComplex c_nominator = make_cuFloatComplex(0.0f, 0.0f);
cuFloatComplex c_denominator = make_cuFloatComplex(0.0f, 0.0f);
__shared__ cuFloatComplex sh_nominator_F[Block_Size];
__shared__ cuFloatComplex sh_denominator_F[Block_Size];
int tid =
threadIdx.x; // one thread handles s receivers (with 512 as the interval)
int bid = blockIdx.x; // one block handles one frequency
sh_nominator_F[tid] = make_cuFloatComplex(0.0f, 0.0f);
sh_denominator_F[tid] = make_cuFloatComplex(0.0f, 0.0f);
__syncthreads();
for (int s = 0; s < (nrec + Block_Size - 1) / Block_Size; s++) {
idr = s * blockDim.x + tid;
idf = bid;
ip = idr * nf + idf;
if (idr >= 0 && idr < nrec && idf >= 0 && idf < nf) {
c_obs = d_data_obs_F[ip];
c_cal = d_data_cal_F[ip];
sh_nominator_F[tid] =
cuCaddf(sh_nominator_F[tid], cuCmulf(cuConjf(c_cal), c_obs));
sh_denominator_F[tid] =
cuCaddf(sh_denominator_F[tid], cuCmulf(cuConjf(c_cal), c_cal));
}
}
__syncthreads();
// do reduction in shared memory
for (int s = blockDim.x / 2; s >= 1; s /= 2) {
if (tid < s) {
sh_nominator_F[tid] =
cuCaddf(sh_nominator_F[tid], sh_nominator_F[tid + s]);
sh_denominator_F[tid] =
cuCaddf(sh_denominator_F[tid], sh_denominator_F[tid + s]);
}
__syncthreads();
}
if (tid == 0) {
sh_denominator_F[0].x += lambda;
// printf("nomi = %f, deno = %f\n", cuCabsf(sh_nominator_F[0]),
// cuCabsf(sh_denominator_F[0]));
sh_nominator_F[0] = cuCdivf(sh_nominator_F[0], sh_denominator_F[0]);
// printf("coef = %f", sh_nominator_F[0].x);
d_coef[bid] = sh_nominator_F[0];
d_source_F[bid] = cuCmulf(d_source_F[bid], sh_nominator_F[0]);
}
// printf("tid = %d\n", tid);
__syncthreads();
for (int s = 0; s < (nrec + Block_Size - 1) / Block_Size; s++) {
idr = s * blockDim.x + tid;
idf = bid;
ip = idr * nf + idf;
if (idr >= 0 && idr < nrec && idf >= 0 && idf < nf) {
d_data_cal_F[ip] = cuCmulf(d_data_cal_F[ip], sh_nominator_F[0]);
// d_data_cal_F[ip].x *= cuCabsf(sh_nominator_F[0]);
// d_data_cal_F[ip].y *= cuCabsf(sh_nominator_F[0]);
// if (tid == 0) printf("ratio = %f\n", cuCabsf(sh_nominator_F[0]));
}
}
__syncthreads();
}
__global__ void cuda_find_absmax(int n, float *data, float *maxval) {
int tid =
threadIdx.x; // one thread handles s receivers (with 512 as the interval)
const int Block_Size = 512;
__shared__ float sh_data[Block_Size];
sh_data[tid] = 0.0;
__syncthreads();
for (int s = 0; s < (n + Block_Size - 1) / Block_Size; s++) {
int ip = s * blockDim.x + tid;
if (ip >= 0 && ip < n) {
if (fabs(data[ip]) > fabs(sh_data[tid])) sh_data[tid] = fabs(data[ip]);
}
}
__syncthreads();
// do reduction in shared memory
for (int s = blockDim.x / 2; s >= 1; s /= 2) {
if (tid < s) {
sh_data[tid] =
(sh_data[tid] >= sh_data[tid + s]) ? sh_data[tid] : sh_data[tid + s];
}
__syncthreads();
}
if (tid == 0) maxval[0] = sh_data[0];
__syncthreads();
}
// 1D band-pass filtering wrapper code
// Steps: padding, FFT, filtering, IFFT, cropping
void bp_filter1d(int nSteps, float dt, int nrec, float *d_data, float *filter) {
int nSteps_pad = 2 * nSteps;
int nfft = nSteps_pad / 2 + 1;
// float df = 1.0/dt/nSteps_pad;
float *d_data_pad;
float f0 = filter[0];
float f1 = filter[1];
float f2 = filter[2];
float f3 = filter[3];
cufftHandle plan_f, plan_b;
cufftComplex *d_data_F;
dim3 threads(TX, TY);
dim3 blocks((nSteps + TX - 1) / TX, (nrec + TY - 1) / TY);
// float *h_test = new float[nSteps_pad];
// pad data
CHECK(cudaMalloc((void **)&d_data_pad, nSteps_pad * nrec * sizeof(float)));
intialArrayGPU<<<blocks, threads>>>(d_data_pad, nSteps_pad, nrec, 0.0);
cuda_embed_crop<<<blocks, threads>>>(nSteps, nrec, d_data, nSteps_pad, nrec,
d_data_pad, true);
// CHECK(cudaMemcpy(h_test, d_data_pad, nSteps * sizeof(float),
// cudaMemcpyDeviceToHost)); displayArray("h_test", h_test, nSteps_pad, 1);
// filtering
CHECK(cudaMalloc((void **)&d_data_F, sizeof(cufftComplex) * nfft * nrec));
cufftPlan1d(&plan_f, nSteps_pad, CUFFT_R2C, nrec);
cufftExecR2C(plan_f, d_data_pad, d_data_F); // forward FFT
cufftDestroy(plan_f);
cuda_bp_filter1d<<<blocks, threads>>>(nSteps_pad, dt, nrec, d_data_F, f0, f1,
f2, f3);
cufftPlan1d(&plan_b, nSteps_pad, CUFFT_C2R, nrec);
cufftExecC2R(plan_b, d_data_F, d_data_pad); // inverse FFT
cufftDestroy(plan_b);
// CHECK(cudaMemcpy(h_test, d_data_pad, nSteps * sizeof(float),
// cudaMemcpyDeviceToHost)); displayArray("h_test", h_test, nSteps_pad, 1);
// crop data
cuda_embed_crop<<<blocks, threads>>>(nSteps, nrec, d_data, nSteps_pad, nrec,
d_data_pad, false);
// normalization (in the padded fft, the length is nSteps_pad)
cuda_normalize<<<blocks, threads>>>(nSteps, nrec, d_data,
1 / float(nSteps_pad));
CHECK(cudaFree(d_data_F));
CHECK(cudaFree(d_data_pad));
}
// source signature and calculated data update
// Steps: padding, FFT, compute spectrum, filtering, IFFT, cropping
float source_update(int nSteps, float dt, int nrec, float *d_data_obs,
float *d_data_cal, float *d_source,
cuFloatComplex *d_coef) {
int nSteps_pad = 2 * nSteps;
int nfft = nSteps_pad / 2 + 1;
float *d_data_obs_pad, *d_data_cal_pad, *d_source_pad;
cufftHandle plan_f, plan_b;
cufftComplex *d_data_obs_F, *d_data_cal_F, *d_source_F;
dim3 threads(TX, TY);
dim3 blocks((nSteps + TX - 1) / TX, (nrec + TY - 1) / TY);
dim3 blocks_pad((nSteps_pad + TX - 1) / TX, (nrec + TY - 1) / TY);
// pad data and window data
CHECK(
cudaMalloc((void **)&d_data_obs_pad, nSteps_pad * nrec * sizeof(float)));
CHECK(
cudaMalloc((void **)&d_data_cal_pad, nSteps_pad * nrec * sizeof(float)));
CHECK(cudaMalloc((void **)&d_source_pad, nSteps_pad * sizeof(float)));
intialArrayGPU<<<blocks, threads>>>(d_data_obs_pad, nSteps_pad, nrec, 0.0);
intialArrayGPU<<<blocks, threads>>>(d_data_cal_pad, nSteps_pad, nrec, 0.0);
intialArrayGPU<<<(nSteps_pad + 31) / 32, 32>>>(d_source_pad, nSteps_pad, 1,
0.0);
cuda_embed_crop<<<blocks, threads>>>(nSteps, nrec, d_data_obs, nSteps_pad,
nrec, d_data_obs_pad, true);
cuda_embed_crop<<<blocks, threads>>>(nSteps, nrec, d_data_cal, nSteps_pad,
nrec, d_data_cal_pad, true);
cuda_window<<<blocks_pad, threads>>>(nSteps_pad, nrec, dt, 0.01,
d_data_obs_pad);
cuda_window<<<blocks_pad, threads>>>(nSteps_pad, nrec, dt, 0.01,
d_data_cal_pad);
cuda_embed_crop<<<(nSteps_pad + 31) / 32, 32>>>(
nSteps, 1, d_source, nSteps_pad, 1, d_source_pad, true);
// CHECK(cudaMemcpy(h_test, d_data_pad, nSteps * sizeof(float),
// cudaMemcpyDeviceToHost)); displayArray("h_test", h_test, nSteps_pad, 1);
// // filtering
CHECK(cudaMalloc((void **)&d_data_obs_F, sizeof(cufftComplex) * nfft * nrec));
CHECK(cudaMalloc((void **)&d_data_cal_F, sizeof(cufftComplex) * nfft * nrec));
CHECK(cudaMalloc((void **)&d_source_F, sizeof(cufftComplex) * nfft))
cufftPlan1d(&plan_f, nSteps_pad, CUFFT_R2C, nrec);
cufftExecR2C(plan_f, d_data_obs_pad,
d_data_obs_F); // forward FFT of observed data
cufftExecR2C(plan_f, d_data_cal_pad,
d_data_cal_F); // forward FFT of calculated data
cufftDestroy(plan_f);
cufftPlan1d(&plan_f, nSteps_pad, CUFFT_R2C, 1); // source FFT
cufftExecR2C(plan_f, d_source_pad, d_source_F);
cufftDestroy(plan_f);
// cuda_bp_filter1d<<<blocks,threads>>>(nSteps_pad, dt, nrec, d_data_F, f0,
// f1, f2, f3);
cuda_spectrum_update<<<nfft, 512>>>(nfft, nrec, d_data_obs_F, d_data_cal_F,
d_source_F, d_coef);
cufftPlan1d(&plan_b, nSteps_pad, CUFFT_C2R, nrec);
cufftExecC2R(plan_b, d_data_cal_F, d_data_cal_pad); // inverse FFT
cufftDestroy(plan_b);
cufftPlan1d(&plan_b, nSteps_pad, CUFFT_C2R, 1);
cufftExecC2R(plan_b, d_source_F, d_source_pad); // inverse FFT
cufftDestroy(plan_b);
// CHECK(cudaMemcpy(h_test, d_data_pad, nSteps * sizeof(float),
// cudaMemcpyDeviceToHost)); displayArray("h_test", h_test, nSteps_pad, 1);
// crop data
cuda_embed_crop<<<blocks, threads>>>(nSteps, nrec, d_data_cal, nSteps_pad,
nrec, d_data_cal_pad, false);
cuda_embed_crop<<<(nSteps + 31) / 32, 32>>>(nSteps, 1, d_source, nSteps_pad,
1, d_source_pad, false);
// normalization (in the padded fft, the length is nSteps_pad)
// printf("amp = %f\n", amp_ratio);
cuda_normalize<<<blocks, threads>>>(nSteps, nrec, d_data_cal,
1.0f / float(nSteps_pad));
cuda_normalize<<<(nSteps + 31) / 32, 32>>>(nSteps, 1, d_source,
1.0f / float(nSteps_pad));
float amp_ratio = amp_ratio_comp(nSteps * nrec, d_data_obs, d_data_cal);
// cuda_normalize<<<blocks,threads>>>(nSteps, nrec, d_data_cal, amp_ratio);
// cuda_normalize<<<(nSteps+31)/32, 32>>>(nSteps, 1, d_source,
// amp_ratio/float(nSteps_pad));
// // update amplitude
// cuda_find_absmax<<<1, 512>>>(nSteps*nrec, d_data_obs, d_obs_maxval);
// cuda_find_absmax<<<1, 512>>>(nSteps*nrec, d_data_cal, d_cal_maxval);
// CHECK(cudaMemcpy(obs_maxval, d_obs_maxval, sizeof(float),
// cudaMemcpyDeviceToHost)); CHECK(cudaMemcpy(cal_maxval, d_cal_maxval,
// sizeof(float), cudaMemcpyDeviceToHost));
// cuda_normalize<<<blocks,threads>>>(nSteps, nrec,
// d_data_cal, 1.0/amp_ratio); printf("Shot gather amplitude ratio = %f\n",
// obs_maxval[0]/cal_maxval[0]);
CHECK(cudaFree(d_data_obs_pad));
CHECK(cudaFree(d_data_cal_pad));
CHECK(cudaFree(d_data_obs_F));
CHECK(cudaFree(d_data_cal_F));
CHECK(cudaFree(d_source_pad));
CHECK(cudaFree(d_source_F));
return amp_ratio;
}
// source signature and calculated data update
// Steps: padding, FFT, compute spectrum, filtering, IFFT, cropping
void source_update_adj(int nSteps, float dt, int nrec, float *d_data,
float amp_ratio, cuFloatComplex *d_coef) {
int nSteps_pad = 2 * nSteps;
int nfft = nSteps_pad / 2 + 1;
float *d_data_pad;
cufftHandle plan_f, plan_b;
cufftComplex *d_data_F;
dim3 threads(TX, TY);
dim3 blocks((nSteps + TX - 1) / TX, (nrec + TY - 1) / TY);
dim3 blocks_pad((nSteps_pad + TX - 1) / TX, (nrec + TY - 1) / TY);
// cuda_normalize<<<blocks,threads>>>(nSteps, nrec, d_data, amp_ratio);
// pad data
CHECK(cudaMalloc((void **)&d_data_pad, nSteps_pad * nrec * sizeof(float)));
intialArrayGPU<<<blocks, threads>>>(d_data_pad, nSteps_pad, nrec, 0.0);
cuda_embed_crop<<<blocks, threads>>>(nSteps, nrec, d_data, nSteps_pad, nrec,
d_data_pad, true);
cuda_window<<<blocks_pad, threads>>>(nSteps_pad, nrec, dt, 0.01, d_data_pad);
CHECK(cudaMalloc((void **)&d_data_F, sizeof(cufftComplex) * nfft * nrec));
cufftPlan1d(&plan_f, nSteps_pad, CUFFT_R2C, nrec);
cufftExecR2C(plan_f, d_data_pad, d_data_F);
cufftDestroy(plan_f);
// update data
cuda_filter1d<<<blocks, threads>>>(nfft, nrec, d_data_F, d_coef);
cufftPlan1d(&plan_b, nSteps_pad, CUFFT_C2R, nrec);
cufftExecC2R(plan_b, d_data_F, d_data_pad); // inverse FFT
cufftDestroy(plan_b);
// crop data
cuda_embed_crop<<<blocks, threads>>>(nSteps, nrec, d_data, nSteps_pad, nrec,
d_data_pad, false);
// normalization (in the padded fft, the length is nSteps_pad)
// printf("amp_adj = %f\n", amp_ratio);
cuda_normalize<<<blocks, threads>>>(nSteps, nrec, d_data,
amp_ratio / float(nSteps_pad));
CHECK(cudaFree(d_data_pad));
CHECK(cudaFree(d_data_F));
}
float amp_ratio_comp(int n, float *d_data_obs, float *d_data_cal) {
float *obs_maxval = NULL, *cal_maxval = NULL;
float *d_obs_maxval, *d_cal_maxval;
obs_maxval = (float *)malloc(sizeof(float));
cal_maxval = (float *)malloc(sizeof(float));
CHECK(cudaMalloc((void **)&d_obs_maxval, sizeof(float)));
CHECK(cudaMalloc((void **)&d_cal_maxval, sizeof(float)));
cuda_find_absmax<<<1, 512>>>(n, d_data_obs, d_obs_maxval);
cuda_find_absmax<<<1, 512>>>(n, d_data_cal, d_cal_maxval);
CHECK(cudaMemcpy(obs_maxval, d_obs_maxval, sizeof(float),
cudaMemcpyDeviceToHost));
CHECK(cudaMemcpy(cal_maxval, d_cal_maxval, sizeof(float),
cudaMemcpyDeviceToHost));
// printf("Shot gather amplitude ratio = %f\n", obs_maxval[0]/cal_maxval[0]);
float ratio = 0.0;
if (cal_maxval[0] != 0.0) {
ratio = obs_maxval[0] / cal_maxval[0];
}
CHECK(cudaFree(d_obs_maxval));
CHECK(cudaFree(d_cal_maxval));
delete[] obs_maxval;
delete[] cal_maxval;
return ratio;
} |
7e7cde03c9e201a08f4011524310f62396c62706.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Created by luozhiwang (luozw1994@outlook.com)
// Date: 2020/3/16
#include "utils.h"
//static void HandleError(hipError_t err, const char *file, int line ) {
// if (err != hipSuccess) {
// printf( "%s in %s at line %d\n", hipGetErrorString( err ),
// file, line );
// exit( EXIT_FAILURE );
// }
//}
//#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
int const threadsPerBlock = sizeof(unsigned long long) * 8;
__device__ inline float devIoU(float const * const a, float const * const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, unsigned long long *dev_mask, int bd=5) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
const int box_dim = bd;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
extern __shared__ float block_boxes[];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * box_dim + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * box_dim + 0];
block_boxes[threadIdx.x * box_dim + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * box_dim + 1];
block_boxes[threadIdx.x * box_dim + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * box_dim + 2];
block_boxes[threadIdx.x * box_dim + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * box_dim + 3];
block_boxes[threadIdx.x * box_dim + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * box_dim + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * box_dim;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * box_dim) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
void _set_device(int device_id) {
int current_device;
CHECK(hipGetDevice(¤t_device));
if (current_device == device_id) {
return;
}
// The call to hipSetDevice must come before any calls to Get, which
// may perform initialization using the GPU.
CHECK(hipSetDevice(device_id));
}
void _nms(int* keep_out, int* num_out, const float* boxes_host, int boxes_num,
int boxes_dim, float nms_overlap_thresh, int device_id) {
_set_device(device_id);
float* boxes_dev = NULL;
unsigned long long* mask_dev = NULL;
const int col_blocks = DIVUP(boxes_num, threadsPerBlock);
CHECK(hipMalloc(&boxes_dev,
boxes_num * boxes_dim * sizeof(float)));
CHECK(hipMemcpy(boxes_dev,
boxes_host,
boxes_num * boxes_dim * sizeof(float),
hipMemcpyHostToDevice));
CHECK(hipMalloc(&mask_dev,
boxes_num * col_blocks * sizeof(unsigned long long)));
dim3 blocks(DIVUP(boxes_num, threadsPerBlock),
DIVUP(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
hipLaunchKernelGGL(( nms_kernel), dim3(blocks), dim3(threads), threadsPerBlock * boxes_dim * sizeof(float), 0, boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev,
boxes_dim);
std::vector<unsigned long long> mask_host(boxes_num * col_blocks);
CHECK(hipMemcpy(&mask_host[0],
mask_dev,
sizeof(unsigned long long) * boxes_num * col_blocks,
hipMemcpyDeviceToHost));
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
*num_out = num_to_keep;
CHECK(hipFree(boxes_dev));
CHECK(hipFree(mask_dev));
}
std::vector<int> nms(std::vector<common::Bbox> bboxes, float threshold) {
if (bboxes.empty()) {
return std::vector<int>();
}
// 1.score
auto *bboxes_1d = new float[bboxes.size() * 5];
for (int i = 0; i < bboxes.size(); ++i) {
bboxes_1d[i * 5] = bboxes[i].xmin;
bboxes_1d[i * 5 + 1] = bboxes[i].ymin;
bboxes_1d[i * 5 + 2] = bboxes[i].xmax;
bboxes_1d[i * 5 + 3] = bboxes[i].ymax;
bboxes_1d[i * 5 + 4] = bboxes[i].score;
}
// 2.device malloc cpy
int *keep_output = new int[bboxes.size()];
int *num_out = new int;
_nms(keep_output, num_out, bboxes_1d, bboxes.size(), 5, threshold, 0);
std::vector<int> keep_idx;
keep_idx.insert(keep_idx.begin(), keep_output, keep_output + *num_out);
delete[]bboxes_1d;
delete[]keep_output;
delete num_out;
return keep_idx;
}
| 7e7cde03c9e201a08f4011524310f62396c62706.cu | // Created by luozhiwang (luozw1994@outlook.com)
// Date: 2020/3/16
#include "utils.h"
//static void HandleError(cudaError_t err, const char *file, int line ) {
// if (err != cudaSuccess) {
// printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
// file, line );
// exit( EXIT_FAILURE );
// }
//}
//#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
int const threadsPerBlock = sizeof(unsigned long long) * 8;
__device__ inline float devIoU(float const * const a, float const * const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, unsigned long long *dev_mask, int bd=5) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
const int box_dim = bd;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
extern __shared__ float block_boxes[];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * box_dim + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * box_dim + 0];
block_boxes[threadIdx.x * box_dim + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * box_dim + 1];
block_boxes[threadIdx.x * box_dim + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * box_dim + 2];
block_boxes[threadIdx.x * box_dim + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * box_dim + 3];
block_boxes[threadIdx.x * box_dim + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * box_dim + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * box_dim;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * box_dim) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
void _set_device(int device_id) {
int current_device;
CHECK(cudaGetDevice(¤t_device));
if (current_device == device_id) {
return;
}
// The call to cudaSetDevice must come before any calls to Get, which
// may perform initialization using the GPU.
CHECK(cudaSetDevice(device_id));
}
void _nms(int* keep_out, int* num_out, const float* boxes_host, int boxes_num,
int boxes_dim, float nms_overlap_thresh, int device_id) {
_set_device(device_id);
float* boxes_dev = NULL;
unsigned long long* mask_dev = NULL;
const int col_blocks = DIVUP(boxes_num, threadsPerBlock);
CHECK(cudaMalloc(&boxes_dev,
boxes_num * boxes_dim * sizeof(float)));
CHECK(cudaMemcpy(boxes_dev,
boxes_host,
boxes_num * boxes_dim * sizeof(float),
cudaMemcpyHostToDevice));
CHECK(cudaMalloc(&mask_dev,
boxes_num * col_blocks * sizeof(unsigned long long)));
dim3 blocks(DIVUP(boxes_num, threadsPerBlock),
DIVUP(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
nms_kernel<<<blocks, threads, threadsPerBlock * boxes_dim * sizeof(float)>>>(boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev,
boxes_dim);
std::vector<unsigned long long> mask_host(boxes_num * col_blocks);
CHECK(cudaMemcpy(&mask_host[0],
mask_dev,
sizeof(unsigned long long) * boxes_num * col_blocks,
cudaMemcpyDeviceToHost));
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
*num_out = num_to_keep;
CHECK(cudaFree(boxes_dev));
CHECK(cudaFree(mask_dev));
}
std::vector<int> nms(std::vector<common::Bbox> bboxes, float threshold) {
if (bboxes.empty()) {
return std::vector<int>();
}
// 1.之前需要按照score排序
auto *bboxes_1d = new float[bboxes.size() * 5];
for (int i = 0; i < bboxes.size(); ++i) {
bboxes_1d[i * 5] = bboxes[i].xmin;
bboxes_1d[i * 5 + 1] = bboxes[i].ymin;
bboxes_1d[i * 5 + 2] = bboxes[i].xmax;
bboxes_1d[i * 5 + 3] = bboxes[i].ymax;
bboxes_1d[i * 5 + 4] = bboxes[i].score;
}
// 2.device malloc cpy
int *keep_output = new int[bboxes.size()];
int *num_out = new int;
_nms(keep_output, num_out, bboxes_1d, bboxes.size(), 5, threshold, 0);
std::vector<int> keep_idx;
keep_idx.insert(keep_idx.begin(), keep_output, keep_output + *num_out);
delete[]bboxes_1d;
delete[]keep_output;
delete num_out;
return keep_idx;
}
|
68552b77bdf862226b697f5d05dde64cf680a226.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
/*
* Perfom a reduction from data of length 'size' to result, where length of result will be 'number of blocks'.
*/
extern "C"
__global__ void multScalar(int n, float *a, float b, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = a[i] * b;
}
} | 68552b77bdf862226b697f5d05dde64cf680a226.cu | #include "includes.h"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
/*
* Perfom a reduction from data of length 'size' to result, where length of result will be 'number of blocks'.
*/
extern "C"
__global__ void multScalar(int n, float *a, float b, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = a[i] * b;
}
} |
25a6db917ffa8d0f707bd602698c83f75a836e5e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <tune_quda.h>
#include <clover_field.h>
#include <launch_kernel.cuh>
#include <jitify_helper.cuh>
#include <kernels/clover_invert.cuh>
namespace quda {
using namespace clover;
#ifdef GPU_CLOVER_DIRAC
template <typename Float, typename Arg>
class CloverInvert : TunableLocalParity {
Arg arg;
const CloverField &meta; // used for meta data only
private:
bool tuneGridDim() const { return true; }
public:
CloverInvert(Arg &arg, const CloverField &meta) : arg(arg), meta(meta) {
writeAuxString("stride=%d,prec=%lu,trlog=%s,twist=%s", arg.clover.stride, sizeof(Float),
arg.computeTraceLog ? "true" : "false", arg.twist ? "true" : "false");
if (meta.Location() == QUDA_CUDA_FIELD_LOCATION) {
#ifdef JITIFY
create_jitify_program("kernels/clover_invert.cuh");
#endif
}
}
virtual ~CloverInvert() { ; }
void apply(const hipStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
arg.result_h[0] = make_double2(0.,0.);
if (meta.Location() == QUDA_CUDA_FIELD_LOCATION) {
#ifdef JITIFY
using namespace jitify::reflection;
jitify_error = program->kernel("quda::cloverInvertKernel")
.instantiate((int)tp.block.x, Type<Float>(), Type<Arg>(), arg.computeTraceLog, arg.twist)
.configure(tp.grid, tp.block, tp.shared_bytes, stream)
.launch(arg);
#else
if (arg.computeTraceLog) {
if (arg.twist) {
errorQuda("Not instantiated");
} else {
LAUNCH_KERNEL_LOCAL_PARITY(cloverInvertKernel, (*this), tp, stream, arg, Float, Arg, true, false);
}
} else {
if (arg.twist) {
hipLaunchKernelGGL(( cloverInvertKernel<1,Float,Arg,false,true>) , dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg);
} else {
hipLaunchKernelGGL(( cloverInvertKernel<1,Float,Arg,false,false>) , dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg);
}
}
#endif
} else {
if (arg.computeTraceLog) {
if (arg.twist) {
cloverInvert<Float, Arg, true, true>(arg);
} else {
cloverInvert<Float, Arg, true, false>(arg);
}
} else {
if (arg.twist) {
cloverInvert<Float, Arg, false, true>(arg);
} else {
cloverInvert<Float, Arg, false, false>(arg);
}
}
}
}
TuneKey tuneKey() const {
return TuneKey(meta.VolString(), typeid(*this).name(), aux);
}
long long flops() const { return 0; }
long long bytes() const { return 2*arg.clover.volumeCB*(arg.inverse.Bytes() + arg.clover.Bytes()); }
void preTune() { if (arg.clover.clover == arg.inverse.clover) arg.inverse.save(); }
void postTune() { if (arg.clover.clover == arg.inverse.clover) arg.inverse.load(); }
};
template <typename Float>
void cloverInvert(CloverField &clover, bool computeTraceLog) {
CloverInvertArg<Float> arg(clover, computeTraceLog);
CloverInvert<Float,CloverInvertArg<Float>> invert(arg, clover);
invert.apply(0);
if (arg.computeTraceLog) {
qudaDeviceSynchronize();
comm_allreduce_array((double*)arg.result_h, 2);
clover.TrLog()[0] = arg.result_h[0].x;
clover.TrLog()[1] = arg.result_h[0].y;
}
}
#endif
// this is the function that is actually called, from here on down we instantiate all required templates
void cloverInvert(CloverField &clover, bool computeTraceLog) {
#ifdef GPU_CLOVER_DIRAC
if (clover.Precision() == QUDA_HALF_PRECISION && clover.Order() > 4)
errorQuda("Half precision not supported for order %d", clover.Order());
if (clover.Precision() == QUDA_DOUBLE_PRECISION) {
cloverInvert<double>(clover, computeTraceLog);
} else if (clover.Precision() == QUDA_SINGLE_PRECISION) {
cloverInvert<float>(clover, computeTraceLog);
} else {
errorQuda("Precision %d not supported", clover.Precision());
}
#else
errorQuda("Clover has not been built");
#endif
}
} // namespace quda
| 25a6db917ffa8d0f707bd602698c83f75a836e5e.cu | #include <tune_quda.h>
#include <clover_field.h>
#include <launch_kernel.cuh>
#include <jitify_helper.cuh>
#include <kernels/clover_invert.cuh>
namespace quda {
using namespace clover;
#ifdef GPU_CLOVER_DIRAC
template <typename Float, typename Arg>
class CloverInvert : TunableLocalParity {
Arg arg;
const CloverField &meta; // used for meta data only
private:
bool tuneGridDim() const { return true; }
public:
CloverInvert(Arg &arg, const CloverField &meta) : arg(arg), meta(meta) {
writeAuxString("stride=%d,prec=%lu,trlog=%s,twist=%s", arg.clover.stride, sizeof(Float),
arg.computeTraceLog ? "true" : "false", arg.twist ? "true" : "false");
if (meta.Location() == QUDA_CUDA_FIELD_LOCATION) {
#ifdef JITIFY
create_jitify_program("kernels/clover_invert.cuh");
#endif
}
}
virtual ~CloverInvert() { ; }
void apply(const cudaStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
arg.result_h[0] = make_double2(0.,0.);
if (meta.Location() == QUDA_CUDA_FIELD_LOCATION) {
#ifdef JITIFY
using namespace jitify::reflection;
jitify_error = program->kernel("quda::cloverInvertKernel")
.instantiate((int)tp.block.x, Type<Float>(), Type<Arg>(), arg.computeTraceLog, arg.twist)
.configure(tp.grid, tp.block, tp.shared_bytes, stream)
.launch(arg);
#else
if (arg.computeTraceLog) {
if (arg.twist) {
errorQuda("Not instantiated");
} else {
LAUNCH_KERNEL_LOCAL_PARITY(cloverInvertKernel, (*this), tp, stream, arg, Float, Arg, true, false);
}
} else {
if (arg.twist) {
cloverInvertKernel<1,Float,Arg,false,true> <<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg);
} else {
cloverInvertKernel<1,Float,Arg,false,false> <<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg);
}
}
#endif
} else {
if (arg.computeTraceLog) {
if (arg.twist) {
cloverInvert<Float, Arg, true, true>(arg);
} else {
cloverInvert<Float, Arg, true, false>(arg);
}
} else {
if (arg.twist) {
cloverInvert<Float, Arg, false, true>(arg);
} else {
cloverInvert<Float, Arg, false, false>(arg);
}
}
}
}
TuneKey tuneKey() const {
return TuneKey(meta.VolString(), typeid(*this).name(), aux);
}
long long flops() const { return 0; }
long long bytes() const { return 2*arg.clover.volumeCB*(arg.inverse.Bytes() + arg.clover.Bytes()); }
void preTune() { if (arg.clover.clover == arg.inverse.clover) arg.inverse.save(); }
void postTune() { if (arg.clover.clover == arg.inverse.clover) arg.inverse.load(); }
};
template <typename Float>
void cloverInvert(CloverField &clover, bool computeTraceLog) {
CloverInvertArg<Float> arg(clover, computeTraceLog);
CloverInvert<Float,CloverInvertArg<Float>> invert(arg, clover);
invert.apply(0);
if (arg.computeTraceLog) {
qudaDeviceSynchronize();
comm_allreduce_array((double*)arg.result_h, 2);
clover.TrLog()[0] = arg.result_h[0].x;
clover.TrLog()[1] = arg.result_h[0].y;
}
}
#endif
// this is the function that is actually called, from here on down we instantiate all required templates
void cloverInvert(CloverField &clover, bool computeTraceLog) {
#ifdef GPU_CLOVER_DIRAC
if (clover.Precision() == QUDA_HALF_PRECISION && clover.Order() > 4)
errorQuda("Half precision not supported for order %d", clover.Order());
if (clover.Precision() == QUDA_DOUBLE_PRECISION) {
cloverInvert<double>(clover, computeTraceLog);
} else if (clover.Precision() == QUDA_SINGLE_PRECISION) {
cloverInvert<float>(clover, computeTraceLog);
} else {
errorQuda("Precision %d not supported", clover.Precision());
}
#else
errorQuda("Clover has not been built");
#endif
}
} // namespace quda
|
5775b37d61e85e0fa7143509adee1bdbbfbce399.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2011, Alex Krizhevsky (akrizhevsky@gmail.com)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <Python.h>
#include <arrayobject.h>
#include <assert.h>
#include <helper_cuda.h>
#include <rocblas.h>
#include <time.h>
#include <vector>
#include <matrix.h>
#include <queue.h>
#include <worker.cuh>
#include <util.cuh>
#include <cost.cuh>
#include <pyconvnet.cuh>
#include <convnet.cuh>
using namespace std;
static ConvNet* model = NULL;
static PyMethodDef _ConvNetMethods[] = { { "initModel", initModel, METH_VARARGS },
{ "startBatch", startBatch, METH_VARARGS },
{ "finishBatch", finishBatch, METH_VARARGS },
{ "checkGradients", checkGradients, METH_VARARGS },
{ "startMultiviewTest", startMultiviewTest, METH_VARARGS },
{ "startFeatureWriter", startFeatureWriter, METH_VARARGS },
{ "syncWithHost", syncWithHost, METH_VARARGS },
{ NULL, NULL }
};
#if defined(_WIN64) || defined(_WIN32)
extern "C" __declspec(dllexport) void initpyconvnet() {
(void) Py_InitModule("pyconvnet", _ConvNetMethods);
import_array();
}
#else
void INITNAME() {
(void) Py_InitModule(QUOTEME(MODELNAME), _ConvNetMethods);
import_array();
}
#endif
PyObject* initModel(PyObject *self, PyObject *args) {
assert(model == NULL);
PyListObject* pyLayerParams;
int pyMinibatchSize;
int pyDeviceID;
if (!PyArg_ParseTuple(args, "O!ii",
&PyList_Type, &pyLayerParams,
&pyMinibatchSize,
&pyDeviceID)) {
return NULL;
}
model = new ConvNet(pyLayerParams,
pyMinibatchSize,
pyDeviceID);
model->start();
return Py_BuildValue("i", 0);
}
/*
* Starts training/testing on the given batch (asynchronous -- returns immediately).
*/
PyObject* startBatch(PyObject *self, PyObject *args) {
assert(model != NULL);
PyListObject* data;
int test = 0;
if (!PyArg_ParseTuple(args, "O!|i",
&PyList_Type, &data,
&test)) {
return NULL;
}
MatrixV& mvec = *getMatrixV((PyObject*)data);
TrainingWorker* wr = new TrainingWorker(*model, *new CPUData(mvec), test);
model->getWorkerQueue().enqueue(wr);
return Py_BuildValue("i", 0);
}
/*
* Starts testing on the given batch (asynchronous -- returns immediately).
*/
PyObject* startMultiviewTest(PyObject *self, PyObject *args) {
assert(model != NULL);
PyListObject* data;
int numViews, logregIdx;
if (!PyArg_ParseTuple(args, "O!ii",
&PyList_Type, &data,
&numViews,
&logregIdx)) {
return NULL;
}
MatrixV& mvec = *getMatrixV((PyObject*)data);
MultiviewTestWorker* wr = new MultiviewTestWorker(*model, *new CPUData(mvec), numViews, logregIdx);
model->getWorkerQueue().enqueue(wr);
return Py_BuildValue("i", 0);
}
PyObject* startFeatureWriter(PyObject *self, PyObject *args) {
assert(model != NULL);
PyListObject* data;
int layerIdx;
if (!PyArg_ParseTuple(args, "O!i",
&PyList_Type, &data,
&layerIdx)) {
return NULL;
}
MatrixV& mvec = *getMatrixV((PyObject*)data);
Matrix& ftrs = *mvec.back();
mvec.pop_back();
FeatureWorker* wr = new FeatureWorker(*model, *new CPUData(mvec), ftrs, layerIdx);
model->getWorkerQueue().enqueue(wr);
return Py_BuildValue("i", 0);
}
/*
* Waits for the trainer to finish training on the batch given to startBatch.
*/
PyObject* finishBatch(PyObject *self, PyObject *args) {
assert(model != NULL);
WorkResult* res = model->getResultQueue().dequeue();
assert(res != NULL);
assert(res->getResultType() == WorkResult::BATCH_DONE);
Cost& cost = res->getResults();
PyObject* dict = PyDict_New();
CostMap& costMap = cost.getCostMap();
for (CostMap::const_iterator it = costMap.begin(); it != costMap.end(); ++it) {
PyObject* v = PyList_New(0);
for (vector<double>::const_iterator iv = it->second->begin(); iv != it->second->end(); ++iv) {
PyObject* f = PyFloat_FromDouble(*iv);
PyList_Append(v, f);
}
PyDict_SetItemString(dict, it->first.c_str(), v);
}
PyObject* retVal = Py_BuildValue("Ni", dict, cost.getNumCases());
delete res; // Deletes cost too
return retVal;
}
PyObject* checkGradients(PyObject *self, PyObject *args) {
assert(model != NULL);
PyListObject* data;
if (!PyArg_ParseTuple(args, "O!",
&PyList_Type, &data)) {
return NULL;
}
MatrixV& mvec = *getMatrixV((PyObject*)data);
GradCheckWorker* wr = new GradCheckWorker(*model, *new CPUData(mvec));
model->getWorkerQueue().enqueue(wr);
WorkResult* res = model->getResultQueue().dequeue();
assert(res != NULL);
assert(res->getResultType() == WorkResult::BATCH_DONE);
delete res;
return Py_BuildValue("i", 0);
}
/*
* Copies weight matrices from GPU to system memory.
*/
PyObject* syncWithHost(PyObject *self, PyObject *args) {
assert(model != NULL);
SyncWorker* wr = new SyncWorker(*model);
model->getWorkerQueue().enqueue(wr);
WorkResult* res = model->getResultQueue().dequeue();
assert(res != NULL);
assert(res->getResultType() == WorkResult::SYNC_DONE);
delete res;
return Py_BuildValue("i", 0);
}
| 5775b37d61e85e0fa7143509adee1bdbbfbce399.cu | /*
* Copyright (c) 2011, Alex Krizhevsky (akrizhevsky@gmail.com)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <Python.h>
#include <arrayobject.h>
#include <assert.h>
#include <helper_cuda.h>
#include <cublas.h>
#include <time.h>
#include <vector>
#include <matrix.h>
#include <queue.h>
#include <worker.cuh>
#include <util.cuh>
#include <cost.cuh>
#include <pyconvnet.cuh>
#include <convnet.cuh>
using namespace std;
static ConvNet* model = NULL;
static PyMethodDef _ConvNetMethods[] = { { "initModel", initModel, METH_VARARGS },
{ "startBatch", startBatch, METH_VARARGS },
{ "finishBatch", finishBatch, METH_VARARGS },
{ "checkGradients", checkGradients, METH_VARARGS },
{ "startMultiviewTest", startMultiviewTest, METH_VARARGS },
{ "startFeatureWriter", startFeatureWriter, METH_VARARGS },
{ "syncWithHost", syncWithHost, METH_VARARGS },
{ NULL, NULL }
};
#if defined(_WIN64) || defined(_WIN32)
extern "C" __declspec(dllexport) void initpyconvnet() {
(void) Py_InitModule("pyconvnet", _ConvNetMethods);
import_array();
}
#else
void INITNAME() {
(void) Py_InitModule(QUOTEME(MODELNAME), _ConvNetMethods);
import_array();
}
#endif
PyObject* initModel(PyObject *self, PyObject *args) {
assert(model == NULL);
PyListObject* pyLayerParams;
int pyMinibatchSize;
int pyDeviceID;
if (!PyArg_ParseTuple(args, "O!ii",
&PyList_Type, &pyLayerParams,
&pyMinibatchSize,
&pyDeviceID)) {
return NULL;
}
model = new ConvNet(pyLayerParams,
pyMinibatchSize,
pyDeviceID);
model->start();
return Py_BuildValue("i", 0);
}
/*
* Starts training/testing on the given batch (asynchronous -- returns immediately).
*/
PyObject* startBatch(PyObject *self, PyObject *args) {
assert(model != NULL);
PyListObject* data;
int test = 0;
if (!PyArg_ParseTuple(args, "O!|i",
&PyList_Type, &data,
&test)) {
return NULL;
}
MatrixV& mvec = *getMatrixV((PyObject*)data);
TrainingWorker* wr = new TrainingWorker(*model, *new CPUData(mvec), test);
model->getWorkerQueue().enqueue(wr);
return Py_BuildValue("i", 0);
}
/*
* Starts testing on the given batch (asynchronous -- returns immediately).
*/
PyObject* startMultiviewTest(PyObject *self, PyObject *args) {
assert(model != NULL);
PyListObject* data;
int numViews, logregIdx;
if (!PyArg_ParseTuple(args, "O!ii",
&PyList_Type, &data,
&numViews,
&logregIdx)) {
return NULL;
}
MatrixV& mvec = *getMatrixV((PyObject*)data);
MultiviewTestWorker* wr = new MultiviewTestWorker(*model, *new CPUData(mvec), numViews, logregIdx);
model->getWorkerQueue().enqueue(wr);
return Py_BuildValue("i", 0);
}
PyObject* startFeatureWriter(PyObject *self, PyObject *args) {
assert(model != NULL);
PyListObject* data;
int layerIdx;
if (!PyArg_ParseTuple(args, "O!i",
&PyList_Type, &data,
&layerIdx)) {
return NULL;
}
MatrixV& mvec = *getMatrixV((PyObject*)data);
Matrix& ftrs = *mvec.back();
mvec.pop_back();
FeatureWorker* wr = new FeatureWorker(*model, *new CPUData(mvec), ftrs, layerIdx);
model->getWorkerQueue().enqueue(wr);
return Py_BuildValue("i", 0);
}
/*
* Waits for the trainer to finish training on the batch given to startBatch.
*/
PyObject* finishBatch(PyObject *self, PyObject *args) {
assert(model != NULL);
WorkResult* res = model->getResultQueue().dequeue();
assert(res != NULL);
assert(res->getResultType() == WorkResult::BATCH_DONE);
Cost& cost = res->getResults();
PyObject* dict = PyDict_New();
CostMap& costMap = cost.getCostMap();
for (CostMap::const_iterator it = costMap.begin(); it != costMap.end(); ++it) {
PyObject* v = PyList_New(0);
for (vector<double>::const_iterator iv = it->second->begin(); iv != it->second->end(); ++iv) {
PyObject* f = PyFloat_FromDouble(*iv);
PyList_Append(v, f);
}
PyDict_SetItemString(dict, it->first.c_str(), v);
}
PyObject* retVal = Py_BuildValue("Ni", dict, cost.getNumCases());
delete res; // Deletes cost too
return retVal;
}
PyObject* checkGradients(PyObject *self, PyObject *args) {
assert(model != NULL);
PyListObject* data;
if (!PyArg_ParseTuple(args, "O!",
&PyList_Type, &data)) {
return NULL;
}
MatrixV& mvec = *getMatrixV((PyObject*)data);
GradCheckWorker* wr = new GradCheckWorker(*model, *new CPUData(mvec));
model->getWorkerQueue().enqueue(wr);
WorkResult* res = model->getResultQueue().dequeue();
assert(res != NULL);
assert(res->getResultType() == WorkResult::BATCH_DONE);
delete res;
return Py_BuildValue("i", 0);
}
/*
* Copies weight matrices from GPU to system memory.
*/
PyObject* syncWithHost(PyObject *self, PyObject *args) {
assert(model != NULL);
SyncWorker* wr = new SyncWorker(*model);
model->getWorkerQueue().enqueue(wr);
WorkResult* res = model->getResultQueue().dequeue();
assert(res != NULL);
assert(res->getResultType() == WorkResult::SYNC_DONE);
delete res;
return Py_BuildValue("i", 0);
}
|
3c15a1f51b69d8f1e1f4b05c347831d44002d45f.hip | // !!! This is a file automatically generated by hipify!!!
/*
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include "../../cuda_by_example/common/book.h"
#include "../../cuda_by_example/common/cpu_bitmap.h"
#define DIM 1000
struct hipComplex {
float r;
float i;
__device__ hipComplex(float a, float b) : r(a), i(b) {}
__device__ float magnitude2(void) {
return r*r + i*i;
}
__device__ hipComplex operator*(const hipComplex& a) {
return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ hipComplex operator+(const hipComplex& a) {
return hipComplex(r + a.r, i + a.i);
}
};
__device__ int julia(int x, int y) {
const float scale = 1.5;
float jx = scale*(float)(DIM / 2 - x) / (DIM / 2);
float jy = scale*(float)(DIM / 2 - y) / (DIM / 2);
hipComplex c(-0.8, 0.156);
hipComplex a(jx, jy);
int i = 0;
for (i = 0; i < 200; i++) {
a = a*a + c;
if (a.magnitude2() > 1000)
return 0;
}
return 1;
}
__global__ void kernel(unsigned char *ptr) {
int x = blockIdx.x;
int y = blockIdx.y;
int offset = x + y*gridDim.x;
int juliaValue = julia(x, y);
ptr[offset * 4 + 0] = 255 * juliaValue;
ptr[offset * 4 + 1] = 0;
ptr[offset * 4 + 2] = 0;
ptr[offset * 4 + 3] = 255;
}
int main()
{
CPUBitmap bitmap(DIM, DIM);
unsigned char *dev_bitmap;
HANDLE_ERROR(hipMalloc((void**)&dev_bitmap, bitmap.image_size()));
dim3 grid(DIM, DIM);
kernel<<<grid, 1>>>(dev_bitmap);
HANDLE_ERROR(hipMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), hipMemcpyDeviceToHost));
HANDLE_ERROR(hipFree(dev_bitmap));
bitmap.display_and_exit();
}
*/
/*
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include "hip/hip_runtime.h"
#include "../../cuda_by_example/common/book.h"
#include "../../cuda_by_example/common/cpu_anim.h"
#define DIM 1024
#define PI 3.1415926535897932f
#define MAX_TEMP 1.0f
#define MIN_TEMP 0.0001f
#define SPEED 0.25f
// these exist on the GPU side
texture<float> texConstSrc;
texture<float> texIn;
texture<float> texOut;
// this kernel takes in a 2-d array of floats
// it updates the value-of-interest by a scaled value based
// on itself and its nearest neighbors
__global__ void blend_kernel(float *dst,
bool dstOut) {
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
int left = offset - 1;
int right = offset + 1;
if (x == 0) left++;
if (x == DIM - 1) right--;
int top = offset - DIM;
int bottom = offset + DIM;
if (y == 0) top += DIM;
if (y == DIM - 1) bottom -= DIM;
float t, l, c, r, b;
if (dstOut) {
t = tex1Dfetch(texIn, top);
l = tex1Dfetch(texIn, left);
c = tex1Dfetch(texIn, offset);
r = tex1Dfetch(texIn, right);
b = tex1Dfetch(texIn, bottom);
}
else {
t = tex1Dfetch(texOut, top);
l = tex1Dfetch(texOut, left);
c = tex1Dfetch(texOut, offset);
r = tex1Dfetch(texOut, right);
b = tex1Dfetch(texOut, bottom);
}
dst[offset] = c + SPEED * (t + b + r + l - 4 * c);
}
// NOTE - texOffsetConstSrc could either be passed as a
// parameter to this function, or passed in __constant__ memory
// if we declared it as a global above, it would be
// a parameter here:
// __global__ void copy_const_kernel( float *iptr,
// size_t texOffset )
__global__ void copy_const_kernel(float *iptr) {
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float c = tex1Dfetch(texConstSrc, offset);
if (c != 0)
iptr[offset] = c;
}
// globals needed by the update routine
struct DataBlock {
unsigned char *output_bitmap;
float *dev_inSrc;
float *dev_outSrc;
float *dev_constSrc;
CPUAnimBitmap *bitmap;
hipEvent_t start, stop;
float totalTime;
float frames;
};
void anim_gpu(DataBlock *d, int ticks) {
HANDLE_ERROR(hipEventRecord(d->start, 0));
dim3 blocks(DIM / 16, DIM / 16);
dim3 threads(16, 16);
CPUAnimBitmap *bitmap = d->bitmap;
// since tex is global and bound, we have to use a flag to
// select which is in/out per iteration
volatile bool dstOut = true;
for (int i = 0; i<90; i++) {
float *in, *out;
if (dstOut) {
in = d->dev_inSrc;
out = d->dev_outSrc;
}
else {
out = d->dev_inSrc;
in = d->dev_outSrc;
}
copy_const_kernel << <blocks, threads >> >(in);
blend_kernel << <blocks, threads >> >(out, dstOut);
dstOut = !dstOut;
}
float_to_color << <blocks, threads >> >(d->output_bitmap,
d->dev_inSrc);
HANDLE_ERROR(hipMemcpy(bitmap->get_ptr(),
d->output_bitmap,
bitmap->image_size(),
hipMemcpyDeviceToHost));
HANDLE_ERROR(hipEventRecord(d->stop, 0));
HANDLE_ERROR(hipEventSynchronize(d->stop));
float elapsedTime;
HANDLE_ERROR(hipEventElapsedTime(&elapsedTime,
d->start, d->stop));
d->totalTime += elapsedTime;
++d->frames;
printf("Average Time per frame: %3.1f ms\n",
d->totalTime / d->frames);
}
// clean up memory allocated on the GPU
void anim_exit(DataBlock *d) {
hipUnbindTexture(texIn);
hipUnbindTexture(texOut);
hipUnbindTexture(texConstSrc);
HANDLE_ERROR(hipFree(d->dev_inSrc));
HANDLE_ERROR(hipFree(d->dev_outSrc));
HANDLE_ERROR(hipFree(d->dev_constSrc));
HANDLE_ERROR(hipEventDestroy(d->start));
HANDLE_ERROR(hipEventDestroy(d->stop));
}
int main(void) {
DataBlock data;
CPUAnimBitmap bitmap(DIM, DIM, &data);
data.bitmap = &bitmap;
data.totalTime = 0;
data.frames = 0;
HANDLE_ERROR(hipEventCreate(&data.start));
HANDLE_ERROR(hipEventCreate(&data.stop));
int imageSize = bitmap.image_size();
HANDLE_ERROR(hipMalloc((void**)&data.output_bitmap,
imageSize));
// assume float == 4 chars in size (ie rgba)
HANDLE_ERROR(hipMalloc((void**)&data.dev_inSrc,
imageSize));
HANDLE_ERROR(hipMalloc((void**)&data.dev_outSrc,
imageSize));
HANDLE_ERROR(hipMalloc((void**)&data.dev_constSrc,
imageSize));
HANDLE_ERROR(hipBindTexture(NULL, texConstSrc,
data.dev_constSrc,
imageSize));
HANDLE_ERROR(hipBindTexture(NULL, texIn,
data.dev_inSrc,
imageSize));
HANDLE_ERROR(hipBindTexture(NULL, texOut,
data.dev_outSrc,
imageSize));
// intialize the constant data
float *temp = (float*)malloc(imageSize);
for (int i = 0; i<DIM*DIM; i++) {
temp[i] = 0;
int x = i % DIM;
int y = i / DIM;
if ((x>300) && (x<600) && (y>310) && (y<601))
temp[i] = MAX_TEMP;
}
temp[DIM * 100 + 100] = (MAX_TEMP + MIN_TEMP) / 2;
temp[DIM * 700 + 100] = MIN_TEMP;
temp[DIM * 300 + 300] = MIN_TEMP;
temp[DIM * 200 + 700] = MIN_TEMP;
for (int y = 800; y<900; y++) {
for (int x = 400; x<500; x++) {
temp[x + y*DIM] = MIN_TEMP;
}
}
HANDLE_ERROR(hipMemcpy(data.dev_constSrc, temp,
imageSize,
hipMemcpyHostToDevice));
// initialize the input data
for (int y = 800; y<DIM; y++) {
for (int x = 0; x<200; x++) {
temp[x + y*DIM] = MAX_TEMP;
}
}
HANDLE_ERROR(hipMemcpy(data.dev_inSrc, temp,
imageSize,
hipMemcpyHostToDevice));
free(temp);
bitmap.anim_and_exit((void(*)(void*, int))anim_gpu,
(void(*)(void*))anim_exit);
}
*/
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include "../../cuda_by_example/common/book.h"
#include "../../cuda_by_example/common/cpu_bitmap.h"
#include "hip/hip_runtime.h"
#include "cuda_gl_interop.h"
#include <math.h>
PFNGLBINDBUFFERARBPROC glBindBuffer = NULL;
PFNGLDELETEBUFFERSARBPROC glDeleteBuffers = NULL;
PFNGLGENBUFFERSARBPROC glGenBuffers = NULL;
PFNGLBUFFERDATAARBPROC glBufferData = NULL;
#define DIM 512
GLuint bufferObj;
cudaGraphicsResource *resource;
// based on ripple code, but uses uchar4 which is the type of data
// graphic inter op uses. see screenshot - basic2.png
__global__ void kernel(uchar4 *ptr) {
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
// now calculate the value at that position
float fx = x / (float)DIM - 0.5f;
float fy = y / (float)DIM - 0.5f;
unsigned char green = 128 + 127 *
sin(abs(fx * 100) - abs(fy * 100));
// accessing uchar4 vs unsigned char*
ptr[offset].x = 0;
ptr[offset].y = green;
ptr[offset].z = 0;
ptr[offset].w = 255;
}
static void key_func(unsigned char key, int x, int y) {
switch (key) {
case 27:
// clean up OpenGL and CUDA
HANDLE_ERROR(hipGraphicsUnregisterResource(resource));
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
glDeleteBuffers(1, &bufferObj);
exit(0);
}
}
static void draw_func(void) {
// we pass zero as the last parameter, because out bufferObj is now
// the source, and the field switches from being a pointer to a
// bitmap to now mean an offset into a bitmap object
glDrawPixels(DIM, DIM, GL_RGBA, GL_UNSIGNED_BYTE, 0);
glutSwapBuffers();
}
int main(int argc, char **argv) {
hipDeviceProp_t prop;
int dev;
memset(&prop, 0, sizeof(hipDeviceProp_t));
prop.major = 1;
prop.minor = 0;
HANDLE_ERROR(hipChooseDevice(&dev, &prop));
// tell CUDA which dev we will be using for graphic interop
// from the programming guide: Interoperability with OpenGL
// requires that the CUDA device be specified by
// hipGLSetGLDevice() before any other runtime calls.
HANDLE_ERROR(hipGLSetGLDevice(dev));
// these GLUT calls need to be made before the other OpenGL
// calls, else we get a seg fault
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA);
glutInitWindowSize(DIM, DIM);
glutCreateWindow("bitmap");
glBindBuffer = (PFNGLBINDBUFFERARBPROC)GET_PROC_ADDRESS("glBindBuffer");
glDeleteBuffers = (PFNGLDELETEBUFFERSARBPROC)GET_PROC_ADDRESS("glDeleteBuffers");
glGenBuffers = (PFNGLGENBUFFERSARBPROC)GET_PROC_ADDRESS("glGenBuffers");
glBufferData = (PFNGLBUFFERDATAARBPROC)GET_PROC_ADDRESS("glBufferData");
// the first three are standard OpenGL, the 4th is the CUDA reg
// of the bitmap these calls exist starting in OpenGL 1.5
glGenBuffers(1, &bufferObj);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, bufferObj);
glBufferData(GL_PIXEL_UNPACK_BUFFER_ARB, DIM * DIM * 4,
NULL, GL_DYNAMIC_DRAW_ARB);
HANDLE_ERROR(
hipGraphicsGLRegisterBuffer(&resource,
bufferObj,
hipGraphicsMapFlagsNone));
// do work with the memory dst being on the GPU, gotten via mapping
HANDLE_ERROR(hipGraphicsMapResources(1, &resource, NULL));
uchar4* devPtr;
size_t size;
HANDLE_ERROR(
hipGraphicsResourceGetMappedPointer((void**)&devPtr,
&size,
resource));
dim3 grids(DIM / 16, DIM / 16);
dim3 threads(16, 16);
hipLaunchKernelGGL(( kernel) , dim3(grids), dim3(threads) , 0, 0, devPtr);
HANDLE_ERROR(hipGraphicsUnmapResources(1, &resource, NULL));
// set up GLUT and kick off main loop
glutKeyboardFunc(key_func);
glutDisplayFunc(draw_func);
glutMainLoop();
}
| 3c15a1f51b69d8f1e1f4b05c347831d44002d45f.cu | /*
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include "../../cuda_by_example/common/book.h"
#include "../../cuda_by_example/common/cpu_bitmap.h"
#define DIM 1000
struct cuComplex {
float r;
float i;
__device__ cuComplex(float a, float b) : r(a), i(b) {}
__device__ float magnitude2(void) {
return r*r + i*i;
}
__device__ cuComplex operator*(const cuComplex& a) {
return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ cuComplex operator+(const cuComplex& a) {
return cuComplex(r + a.r, i + a.i);
}
};
__device__ int julia(int x, int y) {
const float scale = 1.5;
float jx = scale*(float)(DIM / 2 - x) / (DIM / 2);
float jy = scale*(float)(DIM / 2 - y) / (DIM / 2);
cuComplex c(-0.8, 0.156);
cuComplex a(jx, jy);
int i = 0;
for (i = 0; i < 200; i++) {
a = a*a + c;
if (a.magnitude2() > 1000)
return 0;
}
return 1;
}
__global__ void kernel(unsigned char *ptr) {
int x = blockIdx.x;
int y = blockIdx.y;
int offset = x + y*gridDim.x;
int juliaValue = julia(x, y);
ptr[offset * 4 + 0] = 255 * juliaValue;
ptr[offset * 4 + 1] = 0;
ptr[offset * 4 + 2] = 0;
ptr[offset * 4 + 3] = 255;
}
int main()
{
CPUBitmap bitmap(DIM, DIM);
unsigned char *dev_bitmap;
HANDLE_ERROR(cudaMalloc((void**)&dev_bitmap, bitmap.image_size()));
dim3 grid(DIM, DIM);
kernel<<<grid, 1>>>(dev_bitmap);
HANDLE_ERROR(cudaMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaFree(dev_bitmap));
bitmap.display_and_exit();
}
*/
/*
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include "cuda.h"
#include "../../cuda_by_example/common/book.h"
#include "../../cuda_by_example/common/cpu_anim.h"
#define DIM 1024
#define PI 3.1415926535897932f
#define MAX_TEMP 1.0f
#define MIN_TEMP 0.0001f
#define SPEED 0.25f
// these exist on the GPU side
texture<float> texConstSrc;
texture<float> texIn;
texture<float> texOut;
// this kernel takes in a 2-d array of floats
// it updates the value-of-interest by a scaled value based
// on itself and its nearest neighbors
__global__ void blend_kernel(float *dst,
bool dstOut) {
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
int left = offset - 1;
int right = offset + 1;
if (x == 0) left++;
if (x == DIM - 1) right--;
int top = offset - DIM;
int bottom = offset + DIM;
if (y == 0) top += DIM;
if (y == DIM - 1) bottom -= DIM;
float t, l, c, r, b;
if (dstOut) {
t = tex1Dfetch(texIn, top);
l = tex1Dfetch(texIn, left);
c = tex1Dfetch(texIn, offset);
r = tex1Dfetch(texIn, right);
b = tex1Dfetch(texIn, bottom);
}
else {
t = tex1Dfetch(texOut, top);
l = tex1Dfetch(texOut, left);
c = tex1Dfetch(texOut, offset);
r = tex1Dfetch(texOut, right);
b = tex1Dfetch(texOut, bottom);
}
dst[offset] = c + SPEED * (t + b + r + l - 4 * c);
}
// NOTE - texOffsetConstSrc could either be passed as a
// parameter to this function, or passed in __constant__ memory
// if we declared it as a global above, it would be
// a parameter here:
// __global__ void copy_const_kernel( float *iptr,
// size_t texOffset )
__global__ void copy_const_kernel(float *iptr) {
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float c = tex1Dfetch(texConstSrc, offset);
if (c != 0)
iptr[offset] = c;
}
// globals needed by the update routine
struct DataBlock {
unsigned char *output_bitmap;
float *dev_inSrc;
float *dev_outSrc;
float *dev_constSrc;
CPUAnimBitmap *bitmap;
cudaEvent_t start, stop;
float totalTime;
float frames;
};
void anim_gpu(DataBlock *d, int ticks) {
HANDLE_ERROR(cudaEventRecord(d->start, 0));
dim3 blocks(DIM / 16, DIM / 16);
dim3 threads(16, 16);
CPUAnimBitmap *bitmap = d->bitmap;
// since tex is global and bound, we have to use a flag to
// select which is in/out per iteration
volatile bool dstOut = true;
for (int i = 0; i<90; i++) {
float *in, *out;
if (dstOut) {
in = d->dev_inSrc;
out = d->dev_outSrc;
}
else {
out = d->dev_inSrc;
in = d->dev_outSrc;
}
copy_const_kernel << <blocks, threads >> >(in);
blend_kernel << <blocks, threads >> >(out, dstOut);
dstOut = !dstOut;
}
float_to_color << <blocks, threads >> >(d->output_bitmap,
d->dev_inSrc);
HANDLE_ERROR(cudaMemcpy(bitmap->get_ptr(),
d->output_bitmap,
bitmap->image_size(),
cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaEventRecord(d->stop, 0));
HANDLE_ERROR(cudaEventSynchronize(d->stop));
float elapsedTime;
HANDLE_ERROR(cudaEventElapsedTime(&elapsedTime,
d->start, d->stop));
d->totalTime += elapsedTime;
++d->frames;
printf("Average Time per frame: %3.1f ms\n",
d->totalTime / d->frames);
}
// clean up memory allocated on the GPU
void anim_exit(DataBlock *d) {
cudaUnbindTexture(texIn);
cudaUnbindTexture(texOut);
cudaUnbindTexture(texConstSrc);
HANDLE_ERROR(cudaFree(d->dev_inSrc));
HANDLE_ERROR(cudaFree(d->dev_outSrc));
HANDLE_ERROR(cudaFree(d->dev_constSrc));
HANDLE_ERROR(cudaEventDestroy(d->start));
HANDLE_ERROR(cudaEventDestroy(d->stop));
}
int main(void) {
DataBlock data;
CPUAnimBitmap bitmap(DIM, DIM, &data);
data.bitmap = &bitmap;
data.totalTime = 0;
data.frames = 0;
HANDLE_ERROR(cudaEventCreate(&data.start));
HANDLE_ERROR(cudaEventCreate(&data.stop));
int imageSize = bitmap.image_size();
HANDLE_ERROR(cudaMalloc((void**)&data.output_bitmap,
imageSize));
// assume float == 4 chars in size (ie rgba)
HANDLE_ERROR(cudaMalloc((void**)&data.dev_inSrc,
imageSize));
HANDLE_ERROR(cudaMalloc((void**)&data.dev_outSrc,
imageSize));
HANDLE_ERROR(cudaMalloc((void**)&data.dev_constSrc,
imageSize));
HANDLE_ERROR(cudaBindTexture(NULL, texConstSrc,
data.dev_constSrc,
imageSize));
HANDLE_ERROR(cudaBindTexture(NULL, texIn,
data.dev_inSrc,
imageSize));
HANDLE_ERROR(cudaBindTexture(NULL, texOut,
data.dev_outSrc,
imageSize));
// intialize the constant data
float *temp = (float*)malloc(imageSize);
for (int i = 0; i<DIM*DIM; i++) {
temp[i] = 0;
int x = i % DIM;
int y = i / DIM;
if ((x>300) && (x<600) && (y>310) && (y<601))
temp[i] = MAX_TEMP;
}
temp[DIM * 100 + 100] = (MAX_TEMP + MIN_TEMP) / 2;
temp[DIM * 700 + 100] = MIN_TEMP;
temp[DIM * 300 + 300] = MIN_TEMP;
temp[DIM * 200 + 700] = MIN_TEMP;
for (int y = 800; y<900; y++) {
for (int x = 400; x<500; x++) {
temp[x + y*DIM] = MIN_TEMP;
}
}
HANDLE_ERROR(cudaMemcpy(data.dev_constSrc, temp,
imageSize,
cudaMemcpyHostToDevice));
// initialize the input data
for (int y = 800; y<DIM; y++) {
for (int x = 0; x<200; x++) {
temp[x + y*DIM] = MAX_TEMP;
}
}
HANDLE_ERROR(cudaMemcpy(data.dev_inSrc, temp,
imageSize,
cudaMemcpyHostToDevice));
free(temp);
bitmap.anim_and_exit((void(*)(void*, int))anim_gpu,
(void(*)(void*))anim_exit);
}
*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include "../../cuda_by_example/common/book.h"
#include "../../cuda_by_example/common/cpu_bitmap.h"
#include "cuda.h"
#include "cuda_gl_interop.h"
#include <math.h>
PFNGLBINDBUFFERARBPROC glBindBuffer = NULL;
PFNGLDELETEBUFFERSARBPROC glDeleteBuffers = NULL;
PFNGLGENBUFFERSARBPROC glGenBuffers = NULL;
PFNGLBUFFERDATAARBPROC glBufferData = NULL;
#define DIM 512
GLuint bufferObj;
cudaGraphicsResource *resource;
// based on ripple code, but uses uchar4 which is the type of data
// graphic inter op uses. see screenshot - basic2.png
__global__ void kernel(uchar4 *ptr) {
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
// now calculate the value at that position
float fx = x / (float)DIM - 0.5f;
float fy = y / (float)DIM - 0.5f;
unsigned char green = 128 + 127 *
sin(abs(fx * 100) - abs(fy * 100));
// accessing uchar4 vs unsigned char*
ptr[offset].x = 0;
ptr[offset].y = green;
ptr[offset].z = 0;
ptr[offset].w = 255;
}
static void key_func(unsigned char key, int x, int y) {
switch (key) {
case 27:
// clean up OpenGL and CUDA
HANDLE_ERROR(cudaGraphicsUnregisterResource(resource));
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
glDeleteBuffers(1, &bufferObj);
exit(0);
}
}
static void draw_func(void) {
// we pass zero as the last parameter, because out bufferObj is now
// the source, and the field switches from being a pointer to a
// bitmap to now mean an offset into a bitmap object
glDrawPixels(DIM, DIM, GL_RGBA, GL_UNSIGNED_BYTE, 0);
glutSwapBuffers();
}
int main(int argc, char **argv) {
cudaDeviceProp prop;
int dev;
memset(&prop, 0, sizeof(cudaDeviceProp));
prop.major = 1;
prop.minor = 0;
HANDLE_ERROR(cudaChooseDevice(&dev, &prop));
// tell CUDA which dev we will be using for graphic interop
// from the programming guide: Interoperability with OpenGL
// requires that the CUDA device be specified by
// cudaGLSetGLDevice() before any other runtime calls.
HANDLE_ERROR(cudaGLSetGLDevice(dev));
// these GLUT calls need to be made before the other OpenGL
// calls, else we get a seg fault
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA);
glutInitWindowSize(DIM, DIM);
glutCreateWindow("bitmap");
glBindBuffer = (PFNGLBINDBUFFERARBPROC)GET_PROC_ADDRESS("glBindBuffer");
glDeleteBuffers = (PFNGLDELETEBUFFERSARBPROC)GET_PROC_ADDRESS("glDeleteBuffers");
glGenBuffers = (PFNGLGENBUFFERSARBPROC)GET_PROC_ADDRESS("glGenBuffers");
glBufferData = (PFNGLBUFFERDATAARBPROC)GET_PROC_ADDRESS("glBufferData");
// the first three are standard OpenGL, the 4th is the CUDA reg
// of the bitmap these calls exist starting in OpenGL 1.5
glGenBuffers(1, &bufferObj);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, bufferObj);
glBufferData(GL_PIXEL_UNPACK_BUFFER_ARB, DIM * DIM * 4,
NULL, GL_DYNAMIC_DRAW_ARB);
HANDLE_ERROR(
cudaGraphicsGLRegisterBuffer(&resource,
bufferObj,
cudaGraphicsMapFlagsNone));
// do work with the memory dst being on the GPU, gotten via mapping
HANDLE_ERROR(cudaGraphicsMapResources(1, &resource, NULL));
uchar4* devPtr;
size_t size;
HANDLE_ERROR(
cudaGraphicsResourceGetMappedPointer((void**)&devPtr,
&size,
resource));
dim3 grids(DIM / 16, DIM / 16);
dim3 threads(16, 16);
kernel <<<grids, threads >>>(devPtr);
HANDLE_ERROR(cudaGraphicsUnmapResources(1, &resource, NULL));
// set up GLUT and kick off main loop
glutKeyboardFunc(key_func);
glutDisplayFunc(draw_func);
glutMainLoop();
}
|
60c70410387cd00e0d084a4911728f5bdc63b8c0.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kernelCalculateHistogram.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned int *histogram = NULL;
hipMalloc(&histogram, XSIZE*YSIZE);
unsigned char *rawPixels = NULL;
hipMalloc(&rawPixels, XSIZE*YSIZE);
long chunkSize = XSIZE*YSIZE;
long totalPixels = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kernelCalculateHistogram), dim3(gridBlock),dim3(threadBlock), 0, 0, histogram,rawPixels,chunkSize,totalPixels);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kernelCalculateHistogram), dim3(gridBlock),dim3(threadBlock), 0, 0, histogram,rawPixels,chunkSize,totalPixels);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kernelCalculateHistogram), dim3(gridBlock),dim3(threadBlock), 0, 0, histogram,rawPixels,chunkSize,totalPixels);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 60c70410387cd00e0d084a4911728f5bdc63b8c0.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kernelCalculateHistogram.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned int *histogram = NULL;
cudaMalloc(&histogram, XSIZE*YSIZE);
unsigned char *rawPixels = NULL;
cudaMalloc(&rawPixels, XSIZE*YSIZE);
long chunkSize = XSIZE*YSIZE;
long totalPixels = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kernelCalculateHistogram<<<gridBlock,threadBlock>>>(histogram,rawPixels,chunkSize,totalPixels);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kernelCalculateHistogram<<<gridBlock,threadBlock>>>(histogram,rawPixels,chunkSize,totalPixels);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kernelCalculateHistogram<<<gridBlock,threadBlock>>>(histogram,rawPixels,chunkSize,totalPixels);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
d17cc1867df356931a50e7504127567a245b86f1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// @file weakloss_gpu.cu
// @brief Weak Loss block implementation (gpu)
// @author Ivn Gonzlez Daz
#include "weakloss.hpp"
#include "../datacu.hpp"
#include <assert.h>
#include <float.h>
#include <stdio.h>
#include <sm_35_atomic_functions.h>
//#include "../mexutils.h"
#include <stdio.h>
#include <string.h>
#include <unistd.h>
extern __device__ double atomicAdd(double* address, double val);
#define RBorders 0.5
/* ---------------------------------------------------------------- */
/* weakloss */
/* ---------------------------------------------------------------- */
/*Kernel that limits the max value of Lambda*/
template<typename T> __global__ void
limLambda_kernel
(T* lambda,
const T*labels,
const T*A,
float maxLambda,
const int channels,
const int numIm,
const int pooledVolume)
{
int pooledIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (pooledIndex < pooledVolume) {
lambda[pooledIndex]=max(lambda[pooledIndex],T(0));
lambda[pooledIndex]=min(lambda[pooledIndex],T(maxLambda));
}
}
/*Kernel that counts the total number of valid pixels*/
template<typename T> __global__ void
contValid_kernel
(const T* pcoords,
int* validPixels,
int* validOuterPixels,
const int pooledVolume,
const int height,
const int width,
const int channels,
const int numIm)
{
int pooledIndex = threadIdx.x + blockIdx.x * blockDim.x;
pcoords+=2*pooledIndex*width*height;
//Set the arrays
if (pooledIndex < pooledVolume) {
validPixels[pooledIndex]=0;
validOuterPixels[pooledIndex]=0;
for (int y = 0; y < height; ++y) {
for (int x = 0; x < width; ++x) {
//If they are valid
if(pcoords[x * height + y]>=0)
validPixels[pooledIndex]++;
if(pcoords[x * height + y]>=RBorders)
validOuterPixels[pooledIndex]++;
}
}
}
}
/*Weak loss forward kernel*/
template<typename T> __global__ void
weakloss_kernel
(T* LCost,
T* DCost,
const T* data,
const T* pcoords,
const T* labels,
const T* lambda,
T* nlambda,
const T* A,
const T* b,
const T*beta,
int* validPixels,
int* validOuterPixels,
T mu,
int iter,
const bool *done,
const int pooledVolume,
const int height,
const int width,
const int channels,
const int numIm)
{
int pooledIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (pooledIndex < pooledVolume) {
//Get the locations of the kernel
//Image
int pim=pooledIndex/(height*width);
//Pixel
int px=pooledIndex-pim*height*width;
T validPx=(T)validPixels[pim];
T validOuterPx=(T)validOuterPixels[pim];
T validInnerPx=validPx-validOuterPx;
//Set the arrays on their initial locations
data += pim*channels*width*height + px ;
pcoords += pim*2*width*height + px ;
labels += pim*channels;
lambda += pim*channels*2;
nlambda += pim*channels*2;
LCost +=pim;
DCost +=pim;
done +=pim;
//If the location is valid
if(*pcoords>=0 && *done==0){
const T* tdata;
T *P=(T*)malloc(channels*sizeof(T));
int l;
//Get maximum values to limit the inputs
T maxLambda=0;
T maxValue=0;
T weight=0;
tdata=data;
for (int z = 0; z < channels; ++z) {
l=(int)labels[z];
maxLambda=max(*tdata+A[2*l]*lambda[2*z]+A[2*l+1]*lambda[2*z+1],maxLambda);
maxValue=max(maxValue,*tdata);
//maxLambda=maxValue;
tdata+=width*height;
if(l>0)
weight+=1-beta[z];
else
weight+=beta[z];
}
weight=weight*2/channels;
//Channels loop
T sumData=0;
T normalizer=0;
T ndata;
tdata=data;
for (int z = 0; z < channels; z++) {
l=(int)labels[z];
//Non-spatial case: count over all pixels
if(l<3){
P[z]=exp(*tdata+A[2*l]*lambda[2*z]+A[2*l+1]*lambda[2*z+1]-maxLambda);
}
//Spatial case, we just consider as possible pixels in the lesion boundary
else{
//We consider as valid pixels on the boundaries
if(*pcoords>=RBorders)
P[z]=exp(*tdata+A[2*l]*lambda[2*z]-maxLambda);
//In the lesion center
else
P[z]=exp(*tdata+A[2*l+1]*lambda[2*z+1]-maxLambda);
}
normalizer+=P[z];
sumData+=exp(*tdata-maxValue);
//Advance one channel
tdata+=width*height;
}
//Update Dual Cost
if(normalizer>0){
atomicAdd(DCost,-weight*(log(normalizer)+maxLambda));
}
else{
atomicAdd(DCost,-weight*(log(T(0.000001))));
}
//Normalization and Lagrangian update
tdata=data;
T inc_LCost=0;
T inc_DCost=0;
for (int z = 0; z < channels; ++z){
l=(int)labels[z];
if(normalizer>0)
P[z]=P[z]/normalizer;
else
P[z]=0;
if(sumData>0)
ndata=exp(*tdata-maxValue)/sumData;
else
ndata=0;
//Update Lambda
if(l<3){
T inc=b[2*l]-A[2*l]*P[z];
atomicAdd(nlambda+2*z,mu*inc/validPx);
inc=b[2*l+1]-A[2*l+1]*P[z];
atomicAdd(nlambda+2*z+1,mu*inc/validPx);
inc_DCost+=lambda[2*z]*b[2*l]+lambda[2*z+1]*b[2*l+1];
}
//Spatially constrained case
else{
if(*pcoords>=RBorders){
T inc=b[2*l]-A[2*l]*P[z];
inc_DCost+=lambda[2*z]*b[2*l];
atomicAdd(nlambda+2*z,mu*inc/validOuterPx);
}
else{
T inc=b[2*l+1]-A[2*l+1]*P[z];
inc_DCost+=lambda[2*z+1]*b[2*l+1];
atomicAdd(nlambda+2*z+1,mu*inc/validInnerPx);
}
}
//Update Lagrangian Cost
ndata=max(ndata,T(0.000001));
if(P[z]>0)
inc_LCost+=weight*P[z]*log(P[z]/ndata);
//Advance one channel
tdata+=width*height;
}
atomicAdd(DCost,inc_DCost);
atomicAdd(LCost,inc_LCost);
free(P);
}
}
}
/*Weak loss backward kernel*/
template <typename T> __global__ void
weakloss_backward_kernel(T* derData,
const T* data,
const T* pcoords,
const T* labels,
T* lambda,
const T* derOutput,
const T* A,
const T* b,
const T* beta,
const float limLambda,
const int pooledVolume,
const int height,
const int width,
const int channels,
const int numIm)
{
int pooledIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (pooledIndex < pooledVolume) {
//Get the locations of the kernel
//Image
int pim=pooledIndex/(height*width);
//Pixel
int px=pooledIndex-pim*height*width;
//Set the arrays on their initial locations
data += pim*channels*width*height + px ;
pcoords += pim*2*width*height + px ;
labels += pim*channels;
lambda += pim*channels*2;
derData += pim*channels*width*height + px ;
//If the location is valid
if(*pcoords>=0){
int l;
const T* tdata;
T *P=(T*)malloc(channels*sizeof(T));
//Get maximum values to limit the inputs
T maxLambda=0;
T maxValue=0;
T weight=0;
tdata=data;
for (int z = 0; z < channels; ++z) {
l=(int)labels[z];
maxLambda=max(*tdata+A[2*l]*lambda[2*z]+A[2*l+1]*lambda[2*z+1],maxLambda);
maxValue=max(maxValue,*tdata);
tdata+=width*height;
if(l>0)
weight+=1-beta[z];
else
weight+=beta[z];
}
weight=2*weight/channels;
//weight=weight/T(width*height);
//Channels loop
T sumData=0;
T normalizer=0;
T ndata;
tdata=data;
for (int z = 0; z < channels; ++z) {
l=(int)labels[z];
//Regular case: count over all pixels
if(l<3){
P[z]=exp(*tdata+A[2*l]*lambda[2*z]+A[2*l+1]*lambda[2*z+1]-maxLambda);
}
//Spatially constrained case: consideronly pixels in the boundary
else{
if(*pcoords>=RBorders)
P[z]=exp(*tdata+A[2*l]*lambda[2*z]-maxLambda);
else
P[z]=exp(*tdata+A[2*l+1]*lambda[2*z+1]-maxLambda);
}
normalizer+=P[z];
sumData+=exp(*tdata-maxValue);
//Advance one channel
tdata+=width*height;
}
tdata=data;
for (int z = 0; z < channels; ++z){
if(normalizer>0)
P[z]=P[z]/normalizer;
else{
P[z]=T(0);
}
if(sumData>0)
ndata=exp(*tdata-maxValue)/sumData;
else{
ndata=T(0);
}
*derData=weight*derOutput[0]*(ndata-P[z]);
derData+=width*height;
tdata+=width*height;
}
free(P);
}
}
}
/* ---------------------------------------------------------------- */
/* Interface */
/* ---------------------------------------------------------------- */
namespace vl { namespace impl {
template <typename type>
struct weakloss<vl::VLDT_GPU, type>
{
static vl::ErrorCode
forward(type * LCost,
type const* data,
type const* pcoords,
type const* labels,
type * lambda,
type const* A,
type const* b,
type const* beta,
float maxLambda,
size_t height, size_t width, size_t channels, size_t numIm)
{
int iter;
int K=2*channels,Niter=200,*validPixels,*validOuterPixels;
type *DCost,*DCost_GPU,*DCost_ant,*LCost_GPU,*BCost;
type *nlambda,*best_lambda;//,*cpu_labels;
type mu=10.0,TDCost,TDCost_ant=-1e10;
bool *done;
int pooledVolume;
hipMalloc(&nlambda, K*numIm*sizeof(type));
hipMalloc(&validPixels, numIm*sizeof(int));
hipMalloc(&validOuterPixels, numIm*sizeof(int));
hipMalloc(&DCost_GPU, numIm*sizeof(type));
hipMalloc(&LCost_GPU, numIm*sizeof(type));
hipMalloc(&done, numIm*sizeof(bool));
hipMemset(done,false,numIm*sizeof(bool));
DCost=(type*)malloc(numIm*sizeof(type));
DCost_ant=(type*)malloc(numIm*sizeof(type));
BCost=(type*)malloc(numIm*sizeof(type));
best_lambda=(type*)malloc(K*numIm*sizeof(type));
//Count number of valid data
pooledVolume=(int)numIm;
hipLaunchKernelGGL(( contValid_kernel<type>)
, dim3(divideAndRoundUp(pooledVolume, VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0,
pcoords,validPixels,validOuterPixels,numIm, height, width, channels, numIm);
//Limit lambda values
pooledVolume=numIm*K;
hipLaunchKernelGGL(( limLambda_kernel<type>)
, dim3(divideAndRoundUp(pooledVolume, VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0,
lambda,labels,A,maxLambda,channels,numIm,numIm*K);
hipMemcpy(best_lambda, lambda, K*numIm*sizeof(type),hipMemcpyDeviceToHost);
//The loop of iterations has to be here as it is operated serially
//Niter=25;
for(iter=0;iter<Niter;iter++){
//hipMemcpy(DCost_GPU, DCost, numIm*sizeof(type), hipMemcpyHostToDevice);
hipMemcpy(nlambda, lambda, K*numIm*sizeof(type),hipMemcpyDeviceToDevice);
//we reset the partial costs
hipMemset(LCost_GPU,0,numIm*sizeof(type));
hipMemset(DCost_GPU,0,numIm*sizeof(type));
pooledVolume = numIm*height*width;
//Forward step
hipLaunchKernelGGL(( weakloss_kernel<type>)
, dim3(divideAndRoundUp(pooledVolume, VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0,
LCost_GPU, DCost_GPU, data, pcoords,
labels,lambda,nlambda, A,b,beta,validPixels,validOuterPixels,mu,iter,done,
pooledVolume,
height, width, channels, numIm);
pooledVolume=numIm*K;
//Limit the values of lambda
hipLaunchKernelGGL(( limLambda_kernel<type>)
, dim3(divideAndRoundUp(pooledVolume, VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0,
nlambda,labels,A,maxLambda,channels,numIm,numIm*K);
hipMemcpy(DCost, DCost_GPU, numIm*sizeof(type), hipMemcpyDeviceToHost);
TDCost=0;
bool worse=false;
//Documents loop to update lambdas
for (int d=0;d<numIm;d++){
type mejora;
bool done_CPU;
hipMemcpy(&done_CPU,&(done[d]), 1*sizeof(bool), hipMemcpyDeviceToHost);
if(iter==0){
mejora=100.0;
TDCost+=DCost[d];
}
else if(done_CPU==true){
mejora=0;
TDCost+=DCost_ant[d];
}
else{
mejora=100.0*(DCost[d]-DCost_ant[d])/abs(DCost_ant[d]);
TDCost+=DCost[d];
}
//Lambda updates
//If we worse the results
if(mejora<-0.1){
done_CPU=false;
//We set the previous best lambda
hipMemcpy(lambda+K*d,best_lambda+K*d, K*sizeof(type),hipMemcpyHostToDevice);
worse=true;
}
else if(mejora<=0.01 ){
done_CPU=true;
}
//if we are improving
else{
//Copy the best option (only in the respective locations)
hipMemcpy(best_lambda+K*d, lambda+K*d, K*sizeof(type),hipMemcpyDeviceToHost);
//Copy nlambda to lambda
hipMemcpy(lambda+K*d, nlambda+K*d, K*sizeof(type),hipMemcpyDeviceToDevice);
DCost_ant[d]=DCost[d];
hipMemcpy(&(BCost[d]), &(LCost_GPU[d]), 1*sizeof(type), hipMemcpyDeviceToHost);
done_CPU=false;
}
hipMemcpy(&(done[d]), &done_CPU, 1*sizeof(bool), hipMemcpyHostToDevice);
}
if(worse && mu>1e-2){
mu=mu*0.5;
//printf("iter %d cost %f mu %f\n",iter,TDCost,mu);
}
else{
type mejora=100.0*(TDCost-TDCost_ant)/abs(TDCost_ant);
// printf("iter %d cost %f mejora %f mu %f\n",iter,TDCost,mejora,mu);
//After 3 iters without improving
if(mejora<0.001 && iter>4){
//printf("iter %d cost %f mejora %f mu %f\n",iter,TDCost,mejora,mu);
break;
}
TDCost_ant=TDCost;
}
}
type TBCost=0;
for (int d=0;d<numIm;d++){
TBCost+=BCost[d];
}
hipMemcpy(LCost, &TBCost, 1*sizeof(type), hipMemcpyHostToDevice);
hipMemcpy(lambda, best_lambda, K*numIm*sizeof(type),hipMemcpyHostToDevice);
hipFree(nlambda);
hipFree(validPixels);
hipFree(validOuterPixels);
hipFree(DCost_GPU);
free(best_lambda);
free(DCost);
hipError_t status = hipPeekAtLastError() ;
return (status == hipSuccess) ? vl::VLE_Success : vl::VLE_Cuda ;
}
static vl::ErrorCode
backward(type* derData,
type const* data,
type const* pcoords,
type const* labels,
type * lambda,
type const* derOutput,
type const* A,
type const* b,
type const* beta,
float maxLambda,
size_t height, size_t width, size_t channels, size_t numIm)
{
int pooledVolume = numIm*height*width;
//Backward kernel
hipLaunchKernelGGL(( weakloss_backward_kernel<type>)
, dim3(divideAndRoundUp(pooledVolume, VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0,
derData, data, pcoords, labels, lambda, derOutput, A, b, beta,maxLambda,
pooledVolume,
height, width, channels, numIm);
hipError_t status = hipPeekAtLastError() ;
return (status == hipSuccess) ? vl::VLE_Success : vl::VLE_Cuda ;
}
} ; // weakloss
} } ; // namespace vl::impl
// Instantiations
template struct vl::impl::weakloss<vl::VLDT_GPU, float> ;
#ifdef ENABLE_DOUBLE
template struct vl::impl::weakloss<vl::VLDT_GPU, double> ;
#endif
| d17cc1867df356931a50e7504127567a245b86f1.cu | // @file weakloss_gpu.cu
// @brief Weak Loss block implementation (gpu)
// @author Iván González Díaz
#include "weakloss.hpp"
#include "../datacu.hpp"
#include <assert.h>
#include <float.h>
#include <stdio.h>
#include <sm_35_atomic_functions.h>
//#include "../mexutils.h"
#include <stdio.h>
#include <string.h>
#include <unistd.h>
extern __device__ double atomicAdd(double* address, double val);
#define RBorders 0.5
/* ---------------------------------------------------------------- */
/* weakloss */
/* ---------------------------------------------------------------- */
/*Kernel that limits the max value of Lambda*/
template<typename T> __global__ void
limLambda_kernel
(T* lambda,
const T*labels,
const T*A,
float maxLambda,
const int channels,
const int numIm,
const int pooledVolume)
{
int pooledIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (pooledIndex < pooledVolume) {
lambda[pooledIndex]=max(lambda[pooledIndex],T(0));
lambda[pooledIndex]=min(lambda[pooledIndex],T(maxLambda));
}
}
/*Kernel that counts the total number of valid pixels*/
template<typename T> __global__ void
contValid_kernel
(const T* pcoords,
int* validPixels,
int* validOuterPixels,
const int pooledVolume,
const int height,
const int width,
const int channels,
const int numIm)
{
int pooledIndex = threadIdx.x + blockIdx.x * blockDim.x;
pcoords+=2*pooledIndex*width*height;
//Set the arrays
if (pooledIndex < pooledVolume) {
validPixels[pooledIndex]=0;
validOuterPixels[pooledIndex]=0;
for (int y = 0; y < height; ++y) {
for (int x = 0; x < width; ++x) {
//If they are valid
if(pcoords[x * height + y]>=0)
validPixels[pooledIndex]++;
if(pcoords[x * height + y]>=RBorders)
validOuterPixels[pooledIndex]++;
}
}
}
}
/*Weak loss forward kernel*/
template<typename T> __global__ void
weakloss_kernel
(T* LCost,
T* DCost,
const T* data,
const T* pcoords,
const T* labels,
const T* lambda,
T* nlambda,
const T* A,
const T* b,
const T*beta,
int* validPixels,
int* validOuterPixels,
T mu,
int iter,
const bool *done,
const int pooledVolume,
const int height,
const int width,
const int channels,
const int numIm)
{
int pooledIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (pooledIndex < pooledVolume) {
//Get the locations of the kernel
//Image
int pim=pooledIndex/(height*width);
//Pixel
int px=pooledIndex-pim*height*width;
T validPx=(T)validPixels[pim];
T validOuterPx=(T)validOuterPixels[pim];
T validInnerPx=validPx-validOuterPx;
//Set the arrays on their initial locations
data += pim*channels*width*height + px ;
pcoords += pim*2*width*height + px ;
labels += pim*channels;
lambda += pim*channels*2;
nlambda += pim*channels*2;
LCost +=pim;
DCost +=pim;
done +=pim;
//If the location is valid
if(*pcoords>=0 && *done==0){
const T* tdata;
T *P=(T*)malloc(channels*sizeof(T));
int l;
//Get maximum values to limit the inputs
T maxLambda=0;
T maxValue=0;
T weight=0;
tdata=data;
for (int z = 0; z < channels; ++z) {
l=(int)labels[z];
maxLambda=max(*tdata+A[2*l]*lambda[2*z]+A[2*l+1]*lambda[2*z+1],maxLambda);
maxValue=max(maxValue,*tdata);
//maxLambda=maxValue;
tdata+=width*height;
if(l>0)
weight+=1-beta[z];
else
weight+=beta[z];
}
weight=weight*2/channels;
//Channels loop
T sumData=0;
T normalizer=0;
T ndata;
tdata=data;
for (int z = 0; z < channels; z++) {
l=(int)labels[z];
//Non-spatial case: count over all pixels
if(l<3){
P[z]=exp(*tdata+A[2*l]*lambda[2*z]+A[2*l+1]*lambda[2*z+1]-maxLambda);
}
//Spatial case, we just consider as possible pixels in the lesion boundary
else{
//We consider as valid pixels on the boundaries
if(*pcoords>=RBorders)
P[z]=exp(*tdata+A[2*l]*lambda[2*z]-maxLambda);
//In the lesion center
else
P[z]=exp(*tdata+A[2*l+1]*lambda[2*z+1]-maxLambda);
}
normalizer+=P[z];
sumData+=exp(*tdata-maxValue);
//Advance one channel
tdata+=width*height;
}
//Update Dual Cost
if(normalizer>0){
atomicAdd(DCost,-weight*(log(normalizer)+maxLambda));
}
else{
atomicAdd(DCost,-weight*(log(T(0.000001))));
}
//Normalization and Lagrangian update
tdata=data;
T inc_LCost=0;
T inc_DCost=0;
for (int z = 0; z < channels; ++z){
l=(int)labels[z];
if(normalizer>0)
P[z]=P[z]/normalizer;
else
P[z]=0;
if(sumData>0)
ndata=exp(*tdata-maxValue)/sumData;
else
ndata=0;
//Update Lambda
if(l<3){
T inc=b[2*l]-A[2*l]*P[z];
atomicAdd(nlambda+2*z,mu*inc/validPx);
inc=b[2*l+1]-A[2*l+1]*P[z];
atomicAdd(nlambda+2*z+1,mu*inc/validPx);
inc_DCost+=lambda[2*z]*b[2*l]+lambda[2*z+1]*b[2*l+1];
}
//Spatially constrained case
else{
if(*pcoords>=RBorders){
T inc=b[2*l]-A[2*l]*P[z];
inc_DCost+=lambda[2*z]*b[2*l];
atomicAdd(nlambda+2*z,mu*inc/validOuterPx);
}
else{
T inc=b[2*l+1]-A[2*l+1]*P[z];
inc_DCost+=lambda[2*z+1]*b[2*l+1];
atomicAdd(nlambda+2*z+1,mu*inc/validInnerPx);
}
}
//Update Lagrangian Cost
ndata=max(ndata,T(0.000001));
if(P[z]>0)
inc_LCost+=weight*P[z]*log(P[z]/ndata);
//Advance one channel
tdata+=width*height;
}
atomicAdd(DCost,inc_DCost);
atomicAdd(LCost,inc_LCost);
free(P);
}
}
}
/*Weak loss backward kernel*/
template <typename T> __global__ void
weakloss_backward_kernel(T* derData,
const T* data,
const T* pcoords,
const T* labels,
T* lambda,
const T* derOutput,
const T* A,
const T* b,
const T* beta,
const float limLambda,
const int pooledVolume,
const int height,
const int width,
const int channels,
const int numIm)
{
int pooledIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (pooledIndex < pooledVolume) {
//Get the locations of the kernel
//Image
int pim=pooledIndex/(height*width);
//Pixel
int px=pooledIndex-pim*height*width;
//Set the arrays on their initial locations
data += pim*channels*width*height + px ;
pcoords += pim*2*width*height + px ;
labels += pim*channels;
lambda += pim*channels*2;
derData += pim*channels*width*height + px ;
//If the location is valid
if(*pcoords>=0){
int l;
const T* tdata;
T *P=(T*)malloc(channels*sizeof(T));
//Get maximum values to limit the inputs
T maxLambda=0;
T maxValue=0;
T weight=0;
tdata=data;
for (int z = 0; z < channels; ++z) {
l=(int)labels[z];
maxLambda=max(*tdata+A[2*l]*lambda[2*z]+A[2*l+1]*lambda[2*z+1],maxLambda);
maxValue=max(maxValue,*tdata);
tdata+=width*height;
if(l>0)
weight+=1-beta[z];
else
weight+=beta[z];
}
weight=2*weight/channels;
//weight=weight/T(width*height);
//Channels loop
T sumData=0;
T normalizer=0;
T ndata;
tdata=data;
for (int z = 0; z < channels; ++z) {
l=(int)labels[z];
//Regular case: count over all pixels
if(l<3){
P[z]=exp(*tdata+A[2*l]*lambda[2*z]+A[2*l+1]*lambda[2*z+1]-maxLambda);
}
//Spatially constrained case: consideronly pixels in the boundary
else{
if(*pcoords>=RBorders)
P[z]=exp(*tdata+A[2*l]*lambda[2*z]-maxLambda);
else
P[z]=exp(*tdata+A[2*l+1]*lambda[2*z+1]-maxLambda);
}
normalizer+=P[z];
sumData+=exp(*tdata-maxValue);
//Advance one channel
tdata+=width*height;
}
tdata=data;
for (int z = 0; z < channels; ++z){
if(normalizer>0)
P[z]=P[z]/normalizer;
else{
P[z]=T(0);
}
if(sumData>0)
ndata=exp(*tdata-maxValue)/sumData;
else{
ndata=T(0);
}
*derData=weight*derOutput[0]*(ndata-P[z]);
derData+=width*height;
tdata+=width*height;
}
free(P);
}
}
}
/* ---------------------------------------------------------------- */
/* Interface */
/* ---------------------------------------------------------------- */
namespace vl { namespace impl {
template <typename type>
struct weakloss<vl::VLDT_GPU, type>
{
static vl::ErrorCode
forward(type * LCost,
type const* data,
type const* pcoords,
type const* labels,
type * lambda,
type const* A,
type const* b,
type const* beta,
float maxLambda,
size_t height, size_t width, size_t channels, size_t numIm)
{
int iter;
int K=2*channels,Niter=200,*validPixels,*validOuterPixels;
type *DCost,*DCost_GPU,*DCost_ant,*LCost_GPU,*BCost;
type *nlambda,*best_lambda;//,*cpu_labels;
type mu=10.0,TDCost,TDCost_ant=-1e10;
bool *done;
int pooledVolume;
cudaMalloc(&nlambda, K*numIm*sizeof(type));
cudaMalloc(&validPixels, numIm*sizeof(int));
cudaMalloc(&validOuterPixels, numIm*sizeof(int));
cudaMalloc(&DCost_GPU, numIm*sizeof(type));
cudaMalloc(&LCost_GPU, numIm*sizeof(type));
cudaMalloc(&done, numIm*sizeof(bool));
cudaMemset(done,false,numIm*sizeof(bool));
DCost=(type*)malloc(numIm*sizeof(type));
DCost_ant=(type*)malloc(numIm*sizeof(type));
BCost=(type*)malloc(numIm*sizeof(type));
best_lambda=(type*)malloc(K*numIm*sizeof(type));
//Count number of valid data
pooledVolume=(int)numIm;
contValid_kernel<type>
<<< divideAndRoundUp(pooledVolume, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>>
(pcoords,validPixels,validOuterPixels,numIm, height, width, channels, numIm);
//Limit lambda values
pooledVolume=numIm*K;
limLambda_kernel<type>
<<< divideAndRoundUp(pooledVolume, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>>
(lambda,labels,A,maxLambda,channels,numIm,numIm*K);
cudaMemcpy(best_lambda, lambda, K*numIm*sizeof(type),cudaMemcpyDeviceToHost);
//The loop of iterations has to be here as it is operated serially
//Niter=25;
for(iter=0;iter<Niter;iter++){
//cudaMemcpy(DCost_GPU, DCost, numIm*sizeof(type), cudaMemcpyHostToDevice);
cudaMemcpy(nlambda, lambda, K*numIm*sizeof(type),cudaMemcpyDeviceToDevice);
//we reset the partial costs
cudaMemset(LCost_GPU,0,numIm*sizeof(type));
cudaMemset(DCost_GPU,0,numIm*sizeof(type));
pooledVolume = numIm*height*width;
//Forward step
weakloss_kernel<type>
<<< divideAndRoundUp(pooledVolume, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>>
(LCost_GPU, DCost_GPU, data, pcoords,
labels,lambda,nlambda, A,b,beta,validPixels,validOuterPixels,mu,iter,done,
pooledVolume,
height, width, channels, numIm);
pooledVolume=numIm*K;
//Limit the values of lambda
limLambda_kernel<type>
<<< divideAndRoundUp(pooledVolume, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>>
(nlambda,labels,A,maxLambda,channels,numIm,numIm*K);
cudaMemcpy(DCost, DCost_GPU, numIm*sizeof(type), cudaMemcpyDeviceToHost);
TDCost=0;
bool worse=false;
//Documents loop to update lambdas
for (int d=0;d<numIm;d++){
type mejora;
bool done_CPU;
cudaMemcpy(&done_CPU,&(done[d]), 1*sizeof(bool), cudaMemcpyDeviceToHost);
if(iter==0){
mejora=100.0;
TDCost+=DCost[d];
}
else if(done_CPU==true){
mejora=0;
TDCost+=DCost_ant[d];
}
else{
mejora=100.0*(DCost[d]-DCost_ant[d])/abs(DCost_ant[d]);
TDCost+=DCost[d];
}
//Lambda updates
//If we worse the results
if(mejora<-0.1){
done_CPU=false;
//We set the previous best lambda
cudaMemcpy(lambda+K*d,best_lambda+K*d, K*sizeof(type),cudaMemcpyHostToDevice);
worse=true;
}
else if(mejora<=0.01 ){
done_CPU=true;
}
//if we are improving
else{
//Copy the best option (only in the respective locations)
cudaMemcpy(best_lambda+K*d, lambda+K*d, K*sizeof(type),cudaMemcpyDeviceToHost);
//Copy nlambda to lambda
cudaMemcpy(lambda+K*d, nlambda+K*d, K*sizeof(type),cudaMemcpyDeviceToDevice);
DCost_ant[d]=DCost[d];
cudaMemcpy(&(BCost[d]), &(LCost_GPU[d]), 1*sizeof(type), cudaMemcpyDeviceToHost);
done_CPU=false;
}
cudaMemcpy(&(done[d]), &done_CPU, 1*sizeof(bool), cudaMemcpyHostToDevice);
}
if(worse && mu>1e-2){
mu=mu*0.5;
//printf("iter %d cost %f mu %f\n",iter,TDCost,mu);
}
else{
type mejora=100.0*(TDCost-TDCost_ant)/abs(TDCost_ant);
// printf("iter %d cost %f mejora %f mu %f\n",iter,TDCost,mejora,mu);
//After 3 iters without improving
if(mejora<0.001 && iter>4){
//printf("iter %d cost %f mejora %f mu %f\n",iter,TDCost,mejora,mu);
break;
}
TDCost_ant=TDCost;
}
}
type TBCost=0;
for (int d=0;d<numIm;d++){
TBCost+=BCost[d];
}
cudaMemcpy(LCost, &TBCost, 1*sizeof(type), cudaMemcpyHostToDevice);
cudaMemcpy(lambda, best_lambda, K*numIm*sizeof(type),cudaMemcpyHostToDevice);
cudaFree(nlambda);
cudaFree(validPixels);
cudaFree(validOuterPixels);
cudaFree(DCost_GPU);
free(best_lambda);
free(DCost);
cudaError_t status = cudaPeekAtLastError() ;
return (status == cudaSuccess) ? vl::VLE_Success : vl::VLE_Cuda ;
}
static vl::ErrorCode
backward(type* derData,
type const* data,
type const* pcoords,
type const* labels,
type * lambda,
type const* derOutput,
type const* A,
type const* b,
type const* beta,
float maxLambda,
size_t height, size_t width, size_t channels, size_t numIm)
{
int pooledVolume = numIm*height*width;
//Backward kernel
weakloss_backward_kernel<type>
<<< divideAndRoundUp(pooledVolume, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>>
(derData, data, pcoords, labels, lambda, derOutput, A, b, beta,maxLambda,
pooledVolume,
height, width, channels, numIm);
cudaError_t status = cudaPeekAtLastError() ;
return (status == cudaSuccess) ? vl::VLE_Success : vl::VLE_Cuda ;
}
} ; // weakloss
} } ; // namespace vl::impl
// Instantiations
template struct vl::impl::weakloss<vl::VLDT_GPU, float> ;
#ifdef ENABLE_DOUBLE
template struct vl::impl::weakloss<vl::VLDT_GPU, double> ;
#endif
|
d8b0c0d405adc548bc916a15b65fa5779cf37c28.hip | // !!! This is a file automatically generated by hipify!!!
#if __CUDACC_VER_MAJOR__ >= 8
#include "scope/scope.hpp"
#include "args.hpp"
#define NAME "Comm_UM_Prefetch_GPUToHost"
auto Comm_UM_Prefetch_GPUToHost = [](benchmark::State &state, const int numa_id,
const int cuda_id) {
const auto bytes = 1ULL << static_cast<size_t>(state.range(0));
numa::bind_node(numa_id);
if (PRINT_IF_ERROR(scope::cuda_reset_device(cuda_id))) {
state.SkipWithError(NAME " failed to reset device");
return;
}
if (PRINT_IF_ERROR(hipSetDevice(cuda_id))) {
state.SkipWithError(NAME " failed to set CUDA device");
return;
}
char *ptr = nullptr;
if (PRINT_IF_ERROR(hipMallocManaged(&ptr, bytes))) {
state.SkipWithError(NAME " failed to perform hipMallocManaged");
return;
}
defer(hipFree(ptr));
if (PRINT_IF_ERROR(hipMemset(ptr, 0, bytes))) {
state.SkipWithError(NAME " failed to perform hipMemset");
return;
}
hipEvent_t start, stop;
if (PRINT_IF_ERROR(hipEventCreate(&start))) {
state.SkipWithError(NAME " failed to create start event");
return;
}
defer(hipEventDestroy(start));
if (PRINT_IF_ERROR(hipEventCreate(&stop))) {
state.SkipWithError(NAME " failed to create end event");
return;
}
defer(hipEventDestroy(stop));
for (auto _ : state) {
if (PRINT_IF_ERROR(hipMemPrefetchAsync(ptr, bytes, cuda_id))) {
state.SkipWithError(NAME " failed to prefetch to src");
return;
}
if (PRINT_IF_ERROR(hipDeviceSynchronize())) {
state.SkipWithError(NAME " failed to synchronize");
return;
}
hipEventRecord(start);
if (PRINT_IF_ERROR(hipMemPrefetchAsync(ptr, bytes, hipCpuDeviceId))) {
state.SkipWithError(NAME " failed to move data to dst");
return;
}
hipEventRecord(stop);
hipEventSynchronize(stop);
float millis = 0;
if (PRINT_IF_ERROR(hipEventElapsedTime(&millis, start, stop))) {
state.SkipWithError(NAME " failed to get elapsed time");
break;
}
state.SetIterationTime(millis / 1000);
}
state.SetBytesProcessed(int64_t(state.iterations()) * int64_t(bytes));
state.counters["bytes"] = bytes;
state.counters["cuda_id"] = cuda_id;
state.counters["numa_id"] = numa_id;
// reset to run on any node
numa::bind_node(-1);
};
static void registerer() {
for (int cuda_id : scope::system::cuda_devices()) {
for (auto numa_id : numa::mems()) {
std::string name = std::string(NAME) + "/" + std::to_string(numa_id) +
"/" + std::to_string(cuda_id);
benchmark::RegisterBenchmark(name.c_str(), Comm_UM_Prefetch_GPUToHost,
numa_id, cuda_id)
->SMALL_ARGS()
->UseManualTime();
}
}
}
SCOPE_AFTER_INIT(registerer, NAME);
#endif // __CUDACC_VER_MAJOR__ >= 8 | d8b0c0d405adc548bc916a15b65fa5779cf37c28.cu | #if __CUDACC_VER_MAJOR__ >= 8
#include "scope/scope.hpp"
#include "args.hpp"
#define NAME "Comm_UM_Prefetch_GPUToHost"
auto Comm_UM_Prefetch_GPUToHost = [](benchmark::State &state, const int numa_id,
const int cuda_id) {
const auto bytes = 1ULL << static_cast<size_t>(state.range(0));
numa::bind_node(numa_id);
if (PRINT_IF_ERROR(scope::cuda_reset_device(cuda_id))) {
state.SkipWithError(NAME " failed to reset device");
return;
}
if (PRINT_IF_ERROR(cudaSetDevice(cuda_id))) {
state.SkipWithError(NAME " failed to set CUDA device");
return;
}
char *ptr = nullptr;
if (PRINT_IF_ERROR(cudaMallocManaged(&ptr, bytes))) {
state.SkipWithError(NAME " failed to perform cudaMallocManaged");
return;
}
defer(cudaFree(ptr));
if (PRINT_IF_ERROR(cudaMemset(ptr, 0, bytes))) {
state.SkipWithError(NAME " failed to perform cudaMemset");
return;
}
cudaEvent_t start, stop;
if (PRINT_IF_ERROR(cudaEventCreate(&start))) {
state.SkipWithError(NAME " failed to create start event");
return;
}
defer(cudaEventDestroy(start));
if (PRINT_IF_ERROR(cudaEventCreate(&stop))) {
state.SkipWithError(NAME " failed to create end event");
return;
}
defer(cudaEventDestroy(stop));
for (auto _ : state) {
if (PRINT_IF_ERROR(cudaMemPrefetchAsync(ptr, bytes, cuda_id))) {
state.SkipWithError(NAME " failed to prefetch to src");
return;
}
if (PRINT_IF_ERROR(cudaDeviceSynchronize())) {
state.SkipWithError(NAME " failed to synchronize");
return;
}
cudaEventRecord(start);
if (PRINT_IF_ERROR(cudaMemPrefetchAsync(ptr, bytes, cudaCpuDeviceId))) {
state.SkipWithError(NAME " failed to move data to dst");
return;
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float millis = 0;
if (PRINT_IF_ERROR(cudaEventElapsedTime(&millis, start, stop))) {
state.SkipWithError(NAME " failed to get elapsed time");
break;
}
state.SetIterationTime(millis / 1000);
}
state.SetBytesProcessed(int64_t(state.iterations()) * int64_t(bytes));
state.counters["bytes"] = bytes;
state.counters["cuda_id"] = cuda_id;
state.counters["numa_id"] = numa_id;
// reset to run on any node
numa::bind_node(-1);
};
static void registerer() {
for (int cuda_id : scope::system::cuda_devices()) {
for (auto numa_id : numa::mems()) {
std::string name = std::string(NAME) + "/" + std::to_string(numa_id) +
"/" + std::to_string(cuda_id);
benchmark::RegisterBenchmark(name.c_str(), Comm_UM_Prefetch_GPUToHost,
numa_id, cuda_id)
->SMALL_ARGS()
->UseManualTime();
}
}
}
SCOPE_AFTER_INIT(registerer, NAME);
#endif // __CUDACC_VER_MAJOR__ >= 8 |
bff50d5877187ec71c7b3d9e1ed25d47141b55b6.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "uplo_log2.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int sd = 1;
const int unit = 1;
const int bottom = 1;
const REAL *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
const int offset_a = 1;
const int ld_a = 1;
REAL *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
const int offset_b = 1;
const int ld_b = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
uplo_log2), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
uplo_log2), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
uplo_log2), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | bff50d5877187ec71c7b3d9e1ed25d47141b55b6.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "uplo_log2.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int sd = 1;
const int unit = 1;
const int bottom = 1;
const REAL *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
const int offset_a = 1;
const int ld_a = 1;
REAL *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
const int offset_b = 1;
const int ld_b = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
uplo_log2<<<gridBlock,threadBlock>>>(sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
uplo_log2<<<gridBlock,threadBlock>>>(sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
uplo_log2<<<gridBlock,threadBlock>>>(sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
9da8cf528b4a9b7c0f1ae497591132ad14b83e5b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright (c) Microsoft Corporation.
Licensed under the MIT License.
*/
/*
Kernel implementation for blocking repeated n-grams.
*/
#include "core/providers/cuda/cu_inc/common.cuh"
#include "contrib_ops/cuda/bert/ngram_repeat_block_impl.h"
namespace onnxruntime {
namespace contrib {
namespace cuda {
using namespace onnxruntime::cuda;
// Ban repeated ngrams of length = 'no_repeat_ngram_size'
__global__ void banRepeatedTokens(const int64_t* __restrict__ tokens,
float* __restrict__ lprobs,
int max_predict_len, int vocab_size,
int no_repeat_ngram_size) {
auto row = blockIdx.x;
auto col = threadIdx.x;
auto start = row * (max_predict_len) + col;
// Each thread compares ngram starting from
// thread index with final ngram starting from
// step - no_repeat_ngram_size +2
auto check_start_pos = blockDim.x;
auto lprob_start = row * vocab_size;
bool is_banned = true;
extern __shared__ int64_t tokens_shm[];
tokens_shm[col] = tokens[start];
if (col == blockDim.x - 1) {
for (int i=1; i<no_repeat_ngram_size; i++){
if (col+i < max_predict_len){
tokens_shm[col + i] = tokens[start + i];
}
}
}
__syncthreads();
for (int k = 0; k < no_repeat_ngram_size - 1; k++) {
if (tokens_shm[col + k] != tokens_shm[check_start_pos + k]) {
is_banned = false;
}
}
if (is_banned == true) {
auto token_to_be_banned = tokens_shm[col + no_repeat_ngram_size - 1];
lprobs[lprob_start + token_to_be_banned] = -INFINITY;
}
}
// Allocate blocks and threads based on
// batch size and sequence length and launch
// kernel
void NGramRepeatBlockImpl(
hipStream_t stream,
const int64_t* tokens_ptr,
float* scores_ptr,
int bsz,
int step,
int max_predict_len,
int vocab_size,
int beam_size,
int no_repeat_ngram_size) {
int threads = step - no_repeat_ngram_size + 2;
if (threads <= 0) return;
int blocks = bsz * beam_size;
int shared_mem_size = (step + 1) * sizeof(int64_t);
// Launching N blocks where N is number of samples in a batch (beams*bsz)
// Launching T threads where T is number of previous ngrams in a sample
// Allocating shared mem per block for fastser access of input tokens since
// each token will be accessed N times to compare with current Ngram where
// N is Ngram size.
hipLaunchKernelGGL(( banRepeatedTokens), dim3(blocks), dim3(threads), shared_mem_size, stream,
tokens_ptr, scores_ptr, max_predict_len, vocab_size, no_repeat_ngram_size);
}
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
| 9da8cf528b4a9b7c0f1ae497591132ad14b83e5b.cu | /*
Copyright (c) Microsoft Corporation.
Licensed under the MIT License.
*/
/*
Kernel implementation for blocking repeated n-grams.
*/
#include "core/providers/cuda/cu_inc/common.cuh"
#include "contrib_ops/cuda/bert/ngram_repeat_block_impl.h"
namespace onnxruntime {
namespace contrib {
namespace cuda {
using namespace onnxruntime::cuda;
// Ban repeated ngrams of length = 'no_repeat_ngram_size'
__global__ void banRepeatedTokens(const int64_t* __restrict__ tokens,
float* __restrict__ lprobs,
int max_predict_len, int vocab_size,
int no_repeat_ngram_size) {
auto row = blockIdx.x;
auto col = threadIdx.x;
auto start = row * (max_predict_len) + col;
// Each thread compares ngram starting from
// thread index with final ngram starting from
// step - no_repeat_ngram_size +2
auto check_start_pos = blockDim.x;
auto lprob_start = row * vocab_size;
bool is_banned = true;
extern __shared__ int64_t tokens_shm[];
tokens_shm[col] = tokens[start];
if (col == blockDim.x - 1) {
for (int i=1; i<no_repeat_ngram_size; i++){
if (col+i < max_predict_len){
tokens_shm[col + i] = tokens[start + i];
}
}
}
__syncthreads();
for (int k = 0; k < no_repeat_ngram_size - 1; k++) {
if (tokens_shm[col + k] != tokens_shm[check_start_pos + k]) {
is_banned = false;
}
}
if (is_banned == true) {
auto token_to_be_banned = tokens_shm[col + no_repeat_ngram_size - 1];
lprobs[lprob_start + token_to_be_banned] = -INFINITY;
}
}
// Allocate blocks and threads based on
// batch size and sequence length and launch
// kernel
void NGramRepeatBlockImpl(
cudaStream_t stream,
const int64_t* tokens_ptr,
float* scores_ptr,
int bsz,
int step,
int max_predict_len,
int vocab_size,
int beam_size,
int no_repeat_ngram_size) {
int threads = step - no_repeat_ngram_size + 2;
if (threads <= 0) return;
int blocks = bsz * beam_size;
int shared_mem_size = (step + 1) * sizeof(int64_t);
// Launching N blocks where N is number of samples in a batch (beams*bsz)
// Launching T threads where T is number of previous ngrams in a sample
// Allocating shared mem per block for fastser access of input tokens since
// each token will be accessed N times to compare with current Ngram where
// N is Ngram size.
banRepeatedTokens<<<blocks, threads, shared_mem_size, stream>>>(
tokens_ptr, scores_ptr, max_predict_len, vocab_size, no_repeat_ngram_size);
}
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
|
60f455dac91a7085a40245d83fb93833ffdf3da4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ----------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------
/**
* @file
* simple_example.cu
*
* @brief Simple example driver for all three primitives
*/
#include <stdio.h>
#include <string>
#include <deque>
#include <vector>
#include <iostream>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// Graph construction utils
#include <gunrock/graphio/market.cuh>
// CC includes
#include <gunrock/app/cc/cc_enactor.cuh>
#include <gunrock/app/cc/cc_problem.cuh>
#include <gunrock/app/cc/cc_functor.cuh>
// BFS includes
#include <gunrock/app/bfs/bfs_enactor.cuh>
#include <gunrock/app/bfs/bfs_problem.cuh>
#include <gunrock/app/bfs/bfs_functor.cuh>
// BC includes
#include <gunrock/app/bc/bc_enactor.cuh>
#include <gunrock/app/bc/bc_problem.cuh>
#include <gunrock/app/bc/bc_functor.cuh>
// Operator includes
#include <gunrock/oprtr/filter/kernel.cuh>
#include <gunrock/oprtr/advance/kernel.cuh>
#include "cpu_graph_lib.hpp"
using namespace gunrock;
using namespace gunrock::util;
using namespace gunrock::oprtr;
using namespace gunrock::app::cc;
using namespace gunrock::app::bfs;
using namespace gunrock::app::bc;
/******************************************************************************
* Defines, constants, globals
******************************************************************************/
bool g_verbose;
bool g_undirected;
bool g_quick;
bool g_stream_from_host;
template <typename VertexId>
struct CcList {
VertexId root;
unsigned int histogram;
CcList(VertexId root, unsigned int histogram) :
root(root), histogram(histogram) {}
};
template<typename CcList>
bool CCCompare(
CcList elem1,
CcList elem2)
{
return elem1.histogram > elem2.histogram;
}
/******************************************************************************
* Housekeeping Routines
******************************************************************************/
void Usage()
{
printf(
"\nsimple_example <graph type> <graph type args> [--device=<device_index>] "
"[--instrumented] [--quick]\n"
"\n"
"Graph types and args:\n"
" market [<file>]\n"
" Reads a Matrix-Market coordinate-formatted graph of directed/undirected\n"
" edges from stdin (or from the optionally-specified file).\n"
"--instrumentd: If include then show detailed kernel running stats.\n"
"--quick: If include then do not perform CPU validity code.\n"
);
}
/**
* @brief Displays the CC result (i.e., number of components)
*
* @tparam VertexId
* @tparam SizeT
*
* @param[in] comp_ids
* @param[in] nodes
* @param[in] num_components
*/
template<typename VertexId, typename SizeT>
void DisplayCCSolution(VertexId *comp_ids, SizeT nodes,
unsigned int num_components)
{
printf("Number of components: %d\n", num_components);
if (nodes <= 40) {
printf("[");
for (VertexId i = 0; i < nodes; ++i) {
PrintValue(i);
printf(":");
PrintValue(comp_ids[i]);
printf(",");
printf(" ");
}
printf("]\n");
}
}
/**
* @brief Displays the BFS result (i.e., distance from source)
*
* @tparam VertexId
* @tparam SizeT
*
* @param[in] source_path
* @param[in] preds
* @param[in] nodes
* @param[in] MARK_PREDECESSORS
*/
template<typename VertexId, typename SizeT>
void DisplayBFSSolution(VertexId *source_path, VertexId *preds, SizeT nodes,
bool MARK_PREDECESSORS)
{
if (nodes > 40)
nodes = 40;
printf("[");
for (VertexId i = 0; i < nodes; ++i) {
PrintValue(i);
printf(":");
PrintValue(source_path[i]);
printf(",");
if (MARK_PREDECESSORS)
PrintValue(preds[i]);
printf(" ");
}
printf("]\n");
}
/**
* Displays the BC result (sigma value and BC value)
*
* @tparam Value
* @tparam SizeT
*
* @param[in] bc_values
* @param[in] nodes
*/
template<typename Value, typename SizeT>
void DisplayBCSolution(Value *bc_values, SizeT nodes)
{
if (nodes > 40)
nodes = 40;
printf("[");
for (SizeT i = 0; i < nodes; ++i) {
PrintValue(i);
printf(":");
PrintValue(bc_values[i]);
printf(" ");
}
printf("]\n");
}
/**
* Performance/Evaluation statistics
*/
struct Stats {
const char *name;
Statistic rate;
Statistic search_depth;
Statistic redundant_work;
Statistic duty;
Stats() : name(NULL), rate(), search_depth(), redundant_work(), duty() {}
Stats(const char *name) :
name(name), rate(), search_depth(), redundant_work(), duty() {}
};
/**
* @brief Displays timing and correctness statistics for BFS
*
* @tparam MARK_PREDECESSORS
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] stats Reference to ...
* @param[in] src
* @param[in] h_labels
* @param[in] graph Reference to ...
* @param[in] elapsed
* @param[in] search_depth
* @param[in] total_queued
* @param[in] avg_duty
*/
template<
typename VertexId,
typename Value,
typename SizeT>
void DisplayBFSStats(
Stats &stats,
VertexId src,
VertexId *h_labels,
const Csr<VertexId, Value, SizeT> &graph,
double elapsed,
VertexId search_depth,
long long total_queued,
double avg_duty)
{
// Compute nodes and edges visited
SizeT edges_visited = 0;
SizeT nodes_visited = 0;
for (VertexId i = 0; i < graph.nodes; ++i) {
if (h_labels[i] > -1) {
++nodes_visited;
edges_visited += graph.row_offsets[i+1] - graph.row_offsets[i];
}
}
double redundant_work = 0.0;
if (total_queued > 0) {
// measure duplicate edges put through queue
redundant_work =
((double) total_queued - edges_visited) / edges_visited;
}
redundant_work *= 100;
// Display test name
printf("[%s] finished. ", stats.name);
// Display statistics
if (nodes_visited < 5) {
printf("Fewer than 5 vertices visited.\n");
} else {
// Display the specific sample statistics
double m_teps = (double) edges_visited / (elapsed * 1000.0);
printf(" elapsed: %.3f ms, rate: %.3f MiEdges/s", elapsed, m_teps);
if (search_depth != 0) {
printf(", search_depth: %lld", (long long) search_depth);
}
if (avg_duty != 0) {
printf("\n avg CTA duty: %.2f%%", avg_duty * 100);
}
printf("\n src: %lld, nodes_visited: %lld, edges visited: %lld",
(long long) src, (long long) nodes_visited,
(long long) edges_visited);
if (total_queued > 0) {
printf(", total queued: %lld", total_queued);
}
if (redundant_work > 0) {
printf(", redundant work: %.2f%%", redundant_work);
}
printf("\n");
}
}
/**
* @brief A simple CPU-based reference BFS ranking implementation.
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] graph Reference to ...
* @param[in] source_path
* @param[in] src
*/
template<
typename VertexId,
typename Value,
typename SizeT>
void SimpleReferenceBfs(
const Csr<VertexId, Value, SizeT> &graph,
VertexId *source_path,
VertexId src)
{
//initialize distances
for (VertexId i = 0; i < graph.nodes; ++i) {
source_path[i] = -1;
}
source_path[src] = 0;
VertexId search_depth = 0;
// Initialize queue for managing previously-discovered nodes
std::deque<VertexId> frontier;
frontier.push_back(src);
//
//Perform BFS
//
CpuTimer cpu_timer;
cpu_timer.Start();
while (!frontier.empty()) {
// Dequeue node from frontier
VertexId dequeued_node = frontier.front();
frontier.pop_front();
VertexId neighbor_dist = source_path[dequeued_node] + 1;
// Locate adjacency list
int edges_begin = graph.row_offsets[dequeued_node];
int edges_end = graph.row_offsets[dequeued_node + 1];
for (int edge = edges_begin; edge < edges_end; ++edge) {
//Lookup neighbor and enqueue if undiscovered
VertexId neighbor = graph.column_indices[edge];
if (source_path[neighbor] == -1) {
source_path[neighbor] = neighbor_dist;
if (search_depth < neighbor_dist) {
search_depth = neighbor_dist;
}
frontier.push_back(neighbor);
}
}
}
cpu_timer.Stop();
float elapsed = cpu_timer.ElapsedMillis();
search_depth++;
printf("CPU BFS finished in %lf msec. Search depth is:%d\n",
elapsed, search_depth);
}
/**
* @brief Run tests
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
* @tparam INSTRUMENT
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] max_grid_size
* @param[in] num_gpus
* @param[in] max_queue_sizing
* @param[in] context CudaContext pointer for moderngpu APIs
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT>
void RunTests(
const Csr<VertexId, Value, SizeT> &graph,
int max_grid_size,
int num_gpus,
double max_queue_sizing,
CudaContext& context)
{
typedef CCProblem<
VertexId,
SizeT,
Value,
true> CCProblem_T; //use double buffer for edgemap and vertexmap.
// Allocate host-side label array (for both reference and
// gpu-computed results)
VertexId *reference_component_ids =
(VertexId*)malloc(sizeof(VertexId) * graph.nodes);
VertexId *h_component_ids =
(VertexId*)malloc(sizeof(VertexId) * graph.nodes);
VertexId *reference_check =
(g_quick) ? NULL : reference_component_ids;
unsigned int ref_num_components = 0;
// Allocate CC enactor
CCEnactor<INSTRUMENT> cc_enactor(g_verbose);
// Allocate problem on GPU
CCProblem_T *cc_problem = new CCProblem_T;
util::GRError(cc_problem->Init(
g_stream_from_host,
graph,
num_gpus),
"CC Problem Initialization Failed", __FILE__, __LINE__);
//
// Compute reference CPU CC solution for source-distance
//
if (reference_check != NULL)
{
printf("compute ref value\n");
ref_num_components = RefCPUCC(
graph.row_offsets,
graph.column_indices,
graph.nodes,
reference_check);
printf("\n");
}
// Perform CC
GpuTimer gpu_timer;
util::GRError(
cc_problem->Reset(cc_enactor.GetFrontierType()),
"CC Problem Data Reset Failed", __FILE__, __LINE__);
gpu_timer.Start();
util::GRError(
cc_enactor.Enact(cc_problem, max_grid_size),
"CC Problem Enact Failed", __FILE__, __LINE__);
gpu_timer.Stop();
float elapsed = gpu_timer.ElapsedMillis();
// Copy out results
util::GRError(
cc_problem->Extract(h_component_ids),
"CC Problem Data Extraction Failed", __FILE__, __LINE__);
int num_errors = 0;
// Validity
if (ref_num_components == cc_problem->num_components)
printf("CORRECT.\n");
else {
printf("INCORRECT. Ref Component Count: %d, "
"GPU Computed Component Count: %d\n",
ref_num_components, cc_problem->num_components);
printf("TEST FAILED\n");
return;
}
// Compute size and root of each component
VertexId *h_roots =
new VertexId[cc_problem->num_components];
unsigned int *h_histograms =
new unsigned int[cc_problem->num_components];
cc_problem->ComputeCCHistogram(h_component_ids, h_roots, h_histograms);
// Display Solution
DisplayCCSolution(h_component_ids, graph.nodes, ref_num_components);
typedef CcList<VertexId> CcListType;
//sort the components by size
CcListType *cclist =
(CcListType*)malloc(sizeof(CcListType) * ref_num_components);
for (int i = 0; i < ref_num_components; ++i) {
cclist[i].root = h_roots[i];
cclist[i].histogram = h_histograms[i];
}
std::stable_sort(
cclist, cclist + ref_num_components, CCCompare<CcListType>);
// Print out at most top 10 largest components
int top = (ref_num_components < 10) ? ref_num_components : 10;
printf("Top %d largest components:\n", top);
for (int i = 0; i < top; ++i)
{
printf("CC ID: %d, CC Root: %d, CC Size: %d\n",
i, cclist[i].root, cclist[i].histogram);
}
printf("GPU Connected Component finished in %lf msec.\n", elapsed);
VertexId src = cclist[0].root; // Set the root of the largest
// components as BFS source
// Cleanup
delete cc_problem;
delete[] h_roots;
delete[] h_histograms;
free(cclist);
free(reference_component_ids);
free(h_component_ids);
hipDeviceSynchronize();
typedef BFSProblem<
VertexId,
SizeT,
Value,
false, // Set MARK_PREDECESSORS flag true
true, // Set to enable idempotent operation
false> BFSProblem_T; // does not use double buffer
// Allocate host-side label array (for both reference and
// gpu-computed results)
VertexId *reference_labels =
(VertexId*)malloc(sizeof(VertexId) * graph.nodes);
VertexId *h_labels =
(VertexId*)malloc(sizeof(VertexId) * graph.nodes);
reference_check =
(g_quick) ? NULL : reference_labels;
VertexId *h_preds =
(VertexId*)malloc(sizeof(VertexId) * graph.nodes);
// Allocate BFS enactor
BFSEnactor<INSTRUMENT> bfs_enactor(g_verbose);
// Allocate problem on GPU
BFSProblem_T *bfs_problem = new BFSProblem_T;
util::GRError(bfs_problem->Init(
g_stream_from_host,
graph,
num_gpus),
"BFS Problem Initialization Failed", __FILE__, __LINE__);
//
// Compute reference CPU BFS solution for source-distance
//
if (reference_check != NULL)
{
printf("compute ref value\n");
SimpleReferenceBfs(
graph,
reference_check,
src);
printf("\n");
}
Stats *stats = new Stats("GPU BFS");
long long total_queued = 0;
VertexId search_depth = 0;
double avg_duty = 0.0;
// Perform BFS
util::GRError(
bfs_problem->Reset(src,bfs_enactor.GetFrontierType(), max_queue_sizing),
"BFS Problem Data Reset Failed", __FILE__, __LINE__);
gpu_timer.Start();
util::GRError(
bfs_enactor.Enact(context, bfs_problem, src, max_grid_size),
"BFS Problem Enact Failed", __FILE__, __LINE__);
gpu_timer.Stop();
bfs_enactor.GetStatistics(total_queued, search_depth, avg_duty);
elapsed = gpu_timer.ElapsedMillis();
// Copy out results
util::GRError(
bfs_problem->Extract(h_labels, h_preds),
"BFS Problem Data Extraction Failed", __FILE__, __LINE__);
// Verify the result
if (reference_check != NULL) {
printf("Validity: ");
num_errors += CompareResults(h_labels, reference_check, graph.nodes);
}
printf("\nFirst 40 labels of the GPU result.");
// Display Solution
DisplayBFSSolution(h_labels, h_preds, graph.nodes, true);
DisplayBFSStats(
*stats,
src,
h_labels,
graph,
elapsed,
search_depth,
total_queued,
avg_duty);
// Cleanup
delete stats;
delete bfs_problem;
free(reference_labels);
free(h_labels);
free(h_preds);
hipDeviceSynchronize();
// Perform BC
src = -1;
typedef BCProblem<
VertexId,
SizeT,
Value,
true, // MARK_PREDECESSOR
false> BCProblem_T; //does not use double buffer
// Allocate host-side array (for both reference and gpu-computed results)
Value *reference_bc_values =
(Value*)malloc(sizeof(Value) * graph.nodes);
Value *h_bc_values =
(Value*)malloc(sizeof(Value) * graph.nodes);
Value *reference_check_bc_values = (g_quick) ? NULL : reference_bc_values;
// Allocate BC enactor
BCEnactor<INSTRUMENT> bc_enactor(g_verbose);
// Allocate problem on GPU
BCProblem_T *bc_problem = new BCProblem_T;
util::GRError(bc_problem->Init(
g_stream_from_host,
graph,
num_gpus),
"BC Problem Initialization Failed", __FILE__, __LINE__);
//
// Compute reference CPU BC solution for source-distance
//
if (reference_check_bc_values != NULL)
{
printf("compute ref value\n");
RefCPUBC(
graph.row_offsets,
graph.column_indices,
reference_check_bc_values,
graph.nodes,
src);
printf("\n");
}
avg_duty = 0.0;
// Perform BC
VertexId start_src = 0;
VertexId end_src = graph.nodes;
gpu_timer.Start();
for (VertexId i = start_src; i < end_src; ++i)
{
util::GRError(
bc_problem->Reset(i, bc_enactor.GetFrontierType(), max_queue_sizing),
"BC Problem Data Reset Failed", __FILE__, __LINE__);
util::GRError(
bc_enactor.Enact(context, bc_problem, i, max_grid_size),
"BC Problem Enact Failed", __FILE__, __LINE__);
}
// Normalize BC value
hipLaunchKernelGGL(( util::MemsetScaleKernel), dim3(128), dim3(128), 0, 0,
bc_problem->data_slices[0]->d_bc_values, 0.5f, graph.nodes);
gpu_timer.Stop();
elapsed = gpu_timer.ElapsedMillis();
bc_enactor.GetStatistics(avg_duty);
// Copy out results
util::GRError(
bc_problem->Extract(NULL, h_bc_values, NULL),
"BC Problem Data Extraction Failed", __FILE__, __LINE__);
// Verify the result
if (reference_check_bc_values != NULL) {
printf("Validity BC Value: ");
num_errors += CompareResults(
h_bc_values, reference_check_bc_values, graph.nodes, true);
printf("\n");
}
printf("\nFirst 40 bc_values of the GPU result.");
// Display Solution
DisplayBCSolution(h_bc_values, graph.nodes);
printf("GPU BC finished in %lf msec.\n", elapsed);
if (INSTRUMENT && avg_duty != 0)
printf("\n avg CTA duty: %.2f%%\n", avg_duty * 100);
if( 0 == num_errors ) {
printf("\nTEST PASSED\n");
}
else {
printf("\nTEST FAILED\n");
}
// Cleanup
delete bc_problem;
free(reference_bc_values);
free(h_bc_values);
hipDeviceSynchronize();
}
template <
typename VertexId,
typename Value,
typename SizeT>
void RunTests(
Csr<VertexId, Value, SizeT> &graph,
CommandLineArgs &args,
CudaContext& context)
{
bool instrumented = false; // Whether or not to collect
// instrumentation from kernels
int max_grid_size = 0; // maximum grid size (0: leave it
// up to the enactor)
int num_gpus = 1; // Number of GPUs for multi-gpu
// enactor to use
double max_queue_sizing = 1.3; // Maximum size scaling factor
// for work queues. (e.g., 1.3
// creates [1.3n] and
// [1.3m]-element vertex and edge
// frontiers.
instrumented = args.CheckCmdLineFlag("instrumented");
g_quick = args.CheckCmdLineFlag("quick");
args.GetCmdLineArgument("num-gpus", num_gpus);
g_verbose = args.CheckCmdLineFlag("v");
if (instrumented) {
RunTests<VertexId, Value, SizeT, true>(
graph,
max_grid_size,
num_gpus,
max_queue_sizing,
context);
} else {
RunTests<VertexId, Value, SizeT, false>(
graph,
max_grid_size,
num_gpus,
max_queue_sizing,
context);
}
}
/******************************************************************************
* Main
******************************************************************************/
int main( int argc, char** argv)
{
CommandLineArgs args(argc, argv);
if ((argc < 2) || (args.CheckCmdLineFlag("help"))) {
Usage();
return 1;
}
int dev = 0;
args.GetCmdLineArgument("device", dev);
ContextPtr context = mgpu::CreateCudaDevice(dev);
// Parse graph-contruction params
g_undirected = true;
std::string graph_type = argv[1];
int flags = args.ParsedArgc();
int graph_args = argc - flags - 1;
if (graph_args < 1) {
Usage();
return 1;
}
//
// Construct graph and perform search(es)
//
if (graph_type == "market") {
// Matrix-market coordinate-formatted graph file
typedef int VertexId; // Use as the node identifier type
typedef float Value; // Use as the value type
typedef int SizeT; // Use as the graph size type
Csr<VertexId, Value, SizeT> csr(false); // default value for
// stream_from_host is
// false
if (graph_args < 1) { Usage(); return 1; }
char *market_filename = (graph_args == 2) ? argv[2] : NULL;
if (graphio::BuildMarketGraph<false>(
market_filename,
csr,
g_undirected,
false) != 0) // no inverse graph
{
return 1;
}
// csr.DisplayGraph();
fflush(stdout);
// Run tests
RunTests(csr, args, *context);
} else {
// Unknown graph type
fprintf(stderr, "Unspecified graph type\n");
return 1;
}
return 0;
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
| 60f455dac91a7085a40245d83fb93833ffdf3da4.cu | // ----------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------
/**
* @file
* simple_example.cu
*
* @brief Simple example driver for all three primitives
*/
#include <stdio.h>
#include <string>
#include <deque>
#include <vector>
#include <iostream>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// Graph construction utils
#include <gunrock/graphio/market.cuh>
// CC includes
#include <gunrock/app/cc/cc_enactor.cuh>
#include <gunrock/app/cc/cc_problem.cuh>
#include <gunrock/app/cc/cc_functor.cuh>
// BFS includes
#include <gunrock/app/bfs/bfs_enactor.cuh>
#include <gunrock/app/bfs/bfs_problem.cuh>
#include <gunrock/app/bfs/bfs_functor.cuh>
// BC includes
#include <gunrock/app/bc/bc_enactor.cuh>
#include <gunrock/app/bc/bc_problem.cuh>
#include <gunrock/app/bc/bc_functor.cuh>
// Operator includes
#include <gunrock/oprtr/filter/kernel.cuh>
#include <gunrock/oprtr/advance/kernel.cuh>
#include "cpu_graph_lib.hpp"
using namespace gunrock;
using namespace gunrock::util;
using namespace gunrock::oprtr;
using namespace gunrock::app::cc;
using namespace gunrock::app::bfs;
using namespace gunrock::app::bc;
/******************************************************************************
* Defines, constants, globals
******************************************************************************/
bool g_verbose;
bool g_undirected;
bool g_quick;
bool g_stream_from_host;
template <typename VertexId>
struct CcList {
VertexId root;
unsigned int histogram;
CcList(VertexId root, unsigned int histogram) :
root(root), histogram(histogram) {}
};
template<typename CcList>
bool CCCompare(
CcList elem1,
CcList elem2)
{
return elem1.histogram > elem2.histogram;
}
/******************************************************************************
* Housekeeping Routines
******************************************************************************/
void Usage()
{
printf(
"\nsimple_example <graph type> <graph type args> [--device=<device_index>] "
"[--instrumented] [--quick]\n"
"\n"
"Graph types and args:\n"
" market [<file>]\n"
" Reads a Matrix-Market coordinate-formatted graph of directed/undirected\n"
" edges from stdin (or from the optionally-specified file).\n"
"--instrumentd: If include then show detailed kernel running stats.\n"
"--quick: If include then do not perform CPU validity code.\n"
);
}
/**
* @brief Displays the CC result (i.e., number of components)
*
* @tparam VertexId
* @tparam SizeT
*
* @param[in] comp_ids
* @param[in] nodes
* @param[in] num_components
*/
template<typename VertexId, typename SizeT>
void DisplayCCSolution(VertexId *comp_ids, SizeT nodes,
unsigned int num_components)
{
printf("Number of components: %d\n", num_components);
if (nodes <= 40) {
printf("[");
for (VertexId i = 0; i < nodes; ++i) {
PrintValue(i);
printf(":");
PrintValue(comp_ids[i]);
printf(",");
printf(" ");
}
printf("]\n");
}
}
/**
* @brief Displays the BFS result (i.e., distance from source)
*
* @tparam VertexId
* @tparam SizeT
*
* @param[in] source_path
* @param[in] preds
* @param[in] nodes
* @param[in] MARK_PREDECESSORS
*/
template<typename VertexId, typename SizeT>
void DisplayBFSSolution(VertexId *source_path, VertexId *preds, SizeT nodes,
bool MARK_PREDECESSORS)
{
if (nodes > 40)
nodes = 40;
printf("[");
for (VertexId i = 0; i < nodes; ++i) {
PrintValue(i);
printf(":");
PrintValue(source_path[i]);
printf(",");
if (MARK_PREDECESSORS)
PrintValue(preds[i]);
printf(" ");
}
printf("]\n");
}
/**
* Displays the BC result (sigma value and BC value)
*
* @tparam Value
* @tparam SizeT
*
* @param[in] bc_values
* @param[in] nodes
*/
template<typename Value, typename SizeT>
void DisplayBCSolution(Value *bc_values, SizeT nodes)
{
if (nodes > 40)
nodes = 40;
printf("[");
for (SizeT i = 0; i < nodes; ++i) {
PrintValue(i);
printf(":");
PrintValue(bc_values[i]);
printf(" ");
}
printf("]\n");
}
/**
* Performance/Evaluation statistics
*/
struct Stats {
const char *name;
Statistic rate;
Statistic search_depth;
Statistic redundant_work;
Statistic duty;
Stats() : name(NULL), rate(), search_depth(), redundant_work(), duty() {}
Stats(const char *name) :
name(name), rate(), search_depth(), redundant_work(), duty() {}
};
/**
* @brief Displays timing and correctness statistics for BFS
*
* @tparam MARK_PREDECESSORS
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] stats Reference to ...
* @param[in] src
* @param[in] h_labels
* @param[in] graph Reference to ...
* @param[in] elapsed
* @param[in] search_depth
* @param[in] total_queued
* @param[in] avg_duty
*/
template<
typename VertexId,
typename Value,
typename SizeT>
void DisplayBFSStats(
Stats &stats,
VertexId src,
VertexId *h_labels,
const Csr<VertexId, Value, SizeT> &graph,
double elapsed,
VertexId search_depth,
long long total_queued,
double avg_duty)
{
// Compute nodes and edges visited
SizeT edges_visited = 0;
SizeT nodes_visited = 0;
for (VertexId i = 0; i < graph.nodes; ++i) {
if (h_labels[i] > -1) {
++nodes_visited;
edges_visited += graph.row_offsets[i+1] - graph.row_offsets[i];
}
}
double redundant_work = 0.0;
if (total_queued > 0) {
// measure duplicate edges put through queue
redundant_work =
((double) total_queued - edges_visited) / edges_visited;
}
redundant_work *= 100;
// Display test name
printf("[%s] finished. ", stats.name);
// Display statistics
if (nodes_visited < 5) {
printf("Fewer than 5 vertices visited.\n");
} else {
// Display the specific sample statistics
double m_teps = (double) edges_visited / (elapsed * 1000.0);
printf(" elapsed: %.3f ms, rate: %.3f MiEdges/s", elapsed, m_teps);
if (search_depth != 0) {
printf(", search_depth: %lld", (long long) search_depth);
}
if (avg_duty != 0) {
printf("\n avg CTA duty: %.2f%%", avg_duty * 100);
}
printf("\n src: %lld, nodes_visited: %lld, edges visited: %lld",
(long long) src, (long long) nodes_visited,
(long long) edges_visited);
if (total_queued > 0) {
printf(", total queued: %lld", total_queued);
}
if (redundant_work > 0) {
printf(", redundant work: %.2f%%", redundant_work);
}
printf("\n");
}
}
/**
* @brief A simple CPU-based reference BFS ranking implementation.
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] graph Reference to ...
* @param[in] source_path
* @param[in] src
*/
template<
typename VertexId,
typename Value,
typename SizeT>
void SimpleReferenceBfs(
const Csr<VertexId, Value, SizeT> &graph,
VertexId *source_path,
VertexId src)
{
//initialize distances
for (VertexId i = 0; i < graph.nodes; ++i) {
source_path[i] = -1;
}
source_path[src] = 0;
VertexId search_depth = 0;
// Initialize queue for managing previously-discovered nodes
std::deque<VertexId> frontier;
frontier.push_back(src);
//
//Perform BFS
//
CpuTimer cpu_timer;
cpu_timer.Start();
while (!frontier.empty()) {
// Dequeue node from frontier
VertexId dequeued_node = frontier.front();
frontier.pop_front();
VertexId neighbor_dist = source_path[dequeued_node] + 1;
// Locate adjacency list
int edges_begin = graph.row_offsets[dequeued_node];
int edges_end = graph.row_offsets[dequeued_node + 1];
for (int edge = edges_begin; edge < edges_end; ++edge) {
//Lookup neighbor and enqueue if undiscovered
VertexId neighbor = graph.column_indices[edge];
if (source_path[neighbor] == -1) {
source_path[neighbor] = neighbor_dist;
if (search_depth < neighbor_dist) {
search_depth = neighbor_dist;
}
frontier.push_back(neighbor);
}
}
}
cpu_timer.Stop();
float elapsed = cpu_timer.ElapsedMillis();
search_depth++;
printf("CPU BFS finished in %lf msec. Search depth is:%d\n",
elapsed, search_depth);
}
/**
* @brief Run tests
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
* @tparam INSTRUMENT
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] max_grid_size
* @param[in] num_gpus
* @param[in] max_queue_sizing
* @param[in] context CudaContext pointer for moderngpu APIs
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT>
void RunTests(
const Csr<VertexId, Value, SizeT> &graph,
int max_grid_size,
int num_gpus,
double max_queue_sizing,
CudaContext& context)
{
typedef CCProblem<
VertexId,
SizeT,
Value,
true> CCProblem_T; //use double buffer for edgemap and vertexmap.
// Allocate host-side label array (for both reference and
// gpu-computed results)
VertexId *reference_component_ids =
(VertexId*)malloc(sizeof(VertexId) * graph.nodes);
VertexId *h_component_ids =
(VertexId*)malloc(sizeof(VertexId) * graph.nodes);
VertexId *reference_check =
(g_quick) ? NULL : reference_component_ids;
unsigned int ref_num_components = 0;
// Allocate CC enactor
CCEnactor<INSTRUMENT> cc_enactor(g_verbose);
// Allocate problem on GPU
CCProblem_T *cc_problem = new CCProblem_T;
util::GRError(cc_problem->Init(
g_stream_from_host,
graph,
num_gpus),
"CC Problem Initialization Failed", __FILE__, __LINE__);
//
// Compute reference CPU CC solution for source-distance
//
if (reference_check != NULL)
{
printf("compute ref value\n");
ref_num_components = RefCPUCC(
graph.row_offsets,
graph.column_indices,
graph.nodes,
reference_check);
printf("\n");
}
// Perform CC
GpuTimer gpu_timer;
util::GRError(
cc_problem->Reset(cc_enactor.GetFrontierType()),
"CC Problem Data Reset Failed", __FILE__, __LINE__);
gpu_timer.Start();
util::GRError(
cc_enactor.Enact(cc_problem, max_grid_size),
"CC Problem Enact Failed", __FILE__, __LINE__);
gpu_timer.Stop();
float elapsed = gpu_timer.ElapsedMillis();
// Copy out results
util::GRError(
cc_problem->Extract(h_component_ids),
"CC Problem Data Extraction Failed", __FILE__, __LINE__);
int num_errors = 0;
// Validity
if (ref_num_components == cc_problem->num_components)
printf("CORRECT.\n");
else {
printf("INCORRECT. Ref Component Count: %d, "
"GPU Computed Component Count: %d\n",
ref_num_components, cc_problem->num_components);
printf("TEST FAILED\n");
return;
}
// Compute size and root of each component
VertexId *h_roots =
new VertexId[cc_problem->num_components];
unsigned int *h_histograms =
new unsigned int[cc_problem->num_components];
cc_problem->ComputeCCHistogram(h_component_ids, h_roots, h_histograms);
// Display Solution
DisplayCCSolution(h_component_ids, graph.nodes, ref_num_components);
typedef CcList<VertexId> CcListType;
//sort the components by size
CcListType *cclist =
(CcListType*)malloc(sizeof(CcListType) * ref_num_components);
for (int i = 0; i < ref_num_components; ++i) {
cclist[i].root = h_roots[i];
cclist[i].histogram = h_histograms[i];
}
std::stable_sort(
cclist, cclist + ref_num_components, CCCompare<CcListType>);
// Print out at most top 10 largest components
int top = (ref_num_components < 10) ? ref_num_components : 10;
printf("Top %d largest components:\n", top);
for (int i = 0; i < top; ++i)
{
printf("CC ID: %d, CC Root: %d, CC Size: %d\n",
i, cclist[i].root, cclist[i].histogram);
}
printf("GPU Connected Component finished in %lf msec.\n", elapsed);
VertexId src = cclist[0].root; // Set the root of the largest
// components as BFS source
// Cleanup
delete cc_problem;
delete[] h_roots;
delete[] h_histograms;
free(cclist);
free(reference_component_ids);
free(h_component_ids);
cudaDeviceSynchronize();
typedef BFSProblem<
VertexId,
SizeT,
Value,
false, // Set MARK_PREDECESSORS flag true
true, // Set to enable idempotent operation
false> BFSProblem_T; // does not use double buffer
// Allocate host-side label array (for both reference and
// gpu-computed results)
VertexId *reference_labels =
(VertexId*)malloc(sizeof(VertexId) * graph.nodes);
VertexId *h_labels =
(VertexId*)malloc(sizeof(VertexId) * graph.nodes);
reference_check =
(g_quick) ? NULL : reference_labels;
VertexId *h_preds =
(VertexId*)malloc(sizeof(VertexId) * graph.nodes);
// Allocate BFS enactor
BFSEnactor<INSTRUMENT> bfs_enactor(g_verbose);
// Allocate problem on GPU
BFSProblem_T *bfs_problem = new BFSProblem_T;
util::GRError(bfs_problem->Init(
g_stream_from_host,
graph,
num_gpus),
"BFS Problem Initialization Failed", __FILE__, __LINE__);
//
// Compute reference CPU BFS solution for source-distance
//
if (reference_check != NULL)
{
printf("compute ref value\n");
SimpleReferenceBfs(
graph,
reference_check,
src);
printf("\n");
}
Stats *stats = new Stats("GPU BFS");
long long total_queued = 0;
VertexId search_depth = 0;
double avg_duty = 0.0;
// Perform BFS
util::GRError(
bfs_problem->Reset(src,bfs_enactor.GetFrontierType(), max_queue_sizing),
"BFS Problem Data Reset Failed", __FILE__, __LINE__);
gpu_timer.Start();
util::GRError(
bfs_enactor.Enact(context, bfs_problem, src, max_grid_size),
"BFS Problem Enact Failed", __FILE__, __LINE__);
gpu_timer.Stop();
bfs_enactor.GetStatistics(total_queued, search_depth, avg_duty);
elapsed = gpu_timer.ElapsedMillis();
// Copy out results
util::GRError(
bfs_problem->Extract(h_labels, h_preds),
"BFS Problem Data Extraction Failed", __FILE__, __LINE__);
// Verify the result
if (reference_check != NULL) {
printf("Validity: ");
num_errors += CompareResults(h_labels, reference_check, graph.nodes);
}
printf("\nFirst 40 labels of the GPU result.");
// Display Solution
DisplayBFSSolution(h_labels, h_preds, graph.nodes, true);
DisplayBFSStats(
*stats,
src,
h_labels,
graph,
elapsed,
search_depth,
total_queued,
avg_duty);
// Cleanup
delete stats;
delete bfs_problem;
free(reference_labels);
free(h_labels);
free(h_preds);
cudaDeviceSynchronize();
// Perform BC
src = -1;
typedef BCProblem<
VertexId,
SizeT,
Value,
true, // MARK_PREDECESSOR
false> BCProblem_T; //does not use double buffer
// Allocate host-side array (for both reference and gpu-computed results)
Value *reference_bc_values =
(Value*)malloc(sizeof(Value) * graph.nodes);
Value *h_bc_values =
(Value*)malloc(sizeof(Value) * graph.nodes);
Value *reference_check_bc_values = (g_quick) ? NULL : reference_bc_values;
// Allocate BC enactor
BCEnactor<INSTRUMENT> bc_enactor(g_verbose);
// Allocate problem on GPU
BCProblem_T *bc_problem = new BCProblem_T;
util::GRError(bc_problem->Init(
g_stream_from_host,
graph,
num_gpus),
"BC Problem Initialization Failed", __FILE__, __LINE__);
//
// Compute reference CPU BC solution for source-distance
//
if (reference_check_bc_values != NULL)
{
printf("compute ref value\n");
RefCPUBC(
graph.row_offsets,
graph.column_indices,
reference_check_bc_values,
graph.nodes,
src);
printf("\n");
}
avg_duty = 0.0;
// Perform BC
VertexId start_src = 0;
VertexId end_src = graph.nodes;
gpu_timer.Start();
for (VertexId i = start_src; i < end_src; ++i)
{
util::GRError(
bc_problem->Reset(i, bc_enactor.GetFrontierType(), max_queue_sizing),
"BC Problem Data Reset Failed", __FILE__, __LINE__);
util::GRError(
bc_enactor.Enact(context, bc_problem, i, max_grid_size),
"BC Problem Enact Failed", __FILE__, __LINE__);
}
// Normalize BC value
util::MemsetScaleKernel<<<128, 128>>>
(bc_problem->data_slices[0]->d_bc_values, 0.5f, graph.nodes);
gpu_timer.Stop();
elapsed = gpu_timer.ElapsedMillis();
bc_enactor.GetStatistics(avg_duty);
// Copy out results
util::GRError(
bc_problem->Extract(NULL, h_bc_values, NULL),
"BC Problem Data Extraction Failed", __FILE__, __LINE__);
// Verify the result
if (reference_check_bc_values != NULL) {
printf("Validity BC Value: ");
num_errors += CompareResults(
h_bc_values, reference_check_bc_values, graph.nodes, true);
printf("\n");
}
printf("\nFirst 40 bc_values of the GPU result.");
// Display Solution
DisplayBCSolution(h_bc_values, graph.nodes);
printf("GPU BC finished in %lf msec.\n", elapsed);
if (INSTRUMENT && avg_duty != 0)
printf("\n avg CTA duty: %.2f%%\n", avg_duty * 100);
if( 0 == num_errors ) {
printf("\nTEST PASSED\n");
}
else {
printf("\nTEST FAILED\n");
}
// Cleanup
delete bc_problem;
free(reference_bc_values);
free(h_bc_values);
cudaDeviceSynchronize();
}
template <
typename VertexId,
typename Value,
typename SizeT>
void RunTests(
Csr<VertexId, Value, SizeT> &graph,
CommandLineArgs &args,
CudaContext& context)
{
bool instrumented = false; // Whether or not to collect
// instrumentation from kernels
int max_grid_size = 0; // maximum grid size (0: leave it
// up to the enactor)
int num_gpus = 1; // Number of GPUs for multi-gpu
// enactor to use
double max_queue_sizing = 1.3; // Maximum size scaling factor
// for work queues. (e.g., 1.3
// creates [1.3n] and
// [1.3m]-element vertex and edge
// frontiers.
instrumented = args.CheckCmdLineFlag("instrumented");
g_quick = args.CheckCmdLineFlag("quick");
args.GetCmdLineArgument("num-gpus", num_gpus);
g_verbose = args.CheckCmdLineFlag("v");
if (instrumented) {
RunTests<VertexId, Value, SizeT, true>(
graph,
max_grid_size,
num_gpus,
max_queue_sizing,
context);
} else {
RunTests<VertexId, Value, SizeT, false>(
graph,
max_grid_size,
num_gpus,
max_queue_sizing,
context);
}
}
/******************************************************************************
* Main
******************************************************************************/
int main( int argc, char** argv)
{
CommandLineArgs args(argc, argv);
if ((argc < 2) || (args.CheckCmdLineFlag("help"))) {
Usage();
return 1;
}
int dev = 0;
args.GetCmdLineArgument("device", dev);
ContextPtr context = mgpu::CreateCudaDevice(dev);
// Parse graph-contruction params
g_undirected = true;
std::string graph_type = argv[1];
int flags = args.ParsedArgc();
int graph_args = argc - flags - 1;
if (graph_args < 1) {
Usage();
return 1;
}
//
// Construct graph and perform search(es)
//
if (graph_type == "market") {
// Matrix-market coordinate-formatted graph file
typedef int VertexId; // Use as the node identifier type
typedef float Value; // Use as the value type
typedef int SizeT; // Use as the graph size type
Csr<VertexId, Value, SizeT> csr(false); // default value for
// stream_from_host is
// false
if (graph_args < 1) { Usage(); return 1; }
char *market_filename = (graph_args == 2) ? argv[2] : NULL;
if (graphio::BuildMarketGraph<false>(
market_filename,
csr,
g_undirected,
false) != 0) // no inverse graph
{
return 1;
}
// csr.DisplayGraph();
fflush(stdout);
// Run tests
RunTests(csr, args, *context);
} else {
// Unknown graph type
fprintf(stderr, "Unspecified graph type\n");
return 1;
}
return 0;
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
368e18db9c084d9f7ec79eaad743217448f22dcf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
zsymv.cu is nearly identical to zhemv.cu, just change names and drop MAGMA_Z_CONJ.
zhemv_kernel_U (upper) in zhemv_upper.cu is very similar to
zhemv_kernel_L (lower) in zhemv.cu; diff the two files to compare.
@precisions normal z -> s d c
@author Mark Gates
*/
#include "magma_internal.h"
#include "commonblas_z.h"
#define PRECISION_z
#define NB_X 64
#define NB_Y 4
#define bank_shift 33
#define quarter_NB_X 16
#define half_NB_X 32
/***************************************************************************//**
Lower case, compute block multiply, work = A*x, for any size n:
[ (A11*x1) (A21^H*x2) (A31^H*x3) ] [ A11 A21^H A31^H ] [ x1 ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ] = [ A21 A22 A32^H ] * [ x2 ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ] [ A31 A32 A33 ] [ x3 ]
Uses a 64x4 thread block.
For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed).
For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles.
In both cases, each thread multiplies 4 elements.
For rows past the bottom of the matrix, the A pointer is adjusted to be the
last valid row of A, which multiple threads will read.
Extra rows are ignored when saving results to work.
Columns past the right edge are explicitly ignored when loading.
x values past the bottom are set to zero, thus, extra columns are zeroed
when multiplying.
Previously:
[ (A11*x1) --- ]
work = [ (A21^H*x2) (A21*x1 + A22*x2) --- ]
[ (A31^H*x3) (A32^H*x3) (A31*x1 + A32*x2 + A33*x3) ]
which doesn't work as well because that has dimension blocks*NB by blocks,
where blocks*NB >= n, and it can be that blocks*NB > lda, so it won't fit in
lda*blocks space. This is why it used to need lwork = lda*(blocks + 1).
*******************************************************************************/
__global__ void
zhemv_kernel_L(
int n,
magmaDoubleComplex const * __restrict__ A, int lda,
magmaDoubleComplex const * __restrict__ x, int incx,
magmaDoubleComplex * __restrict__ work)
{
#if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200)
// treats sA as 16x64 block
#define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ]
// treats sA as 32x32 block
#define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)])
// 64x4 thread block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blk = blockIdx.x;
const int blk_ind = NB_X * blk;
const int td = NB_X * ty + tx;
// 32x8 thread block
const int tx2 = td % half_NB_X;
const int ty2 = td / half_NB_X;
// If this blk has fewer than NB_X rows, partial is the number of valid rows,
// so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid.
// Else, partial == 0.
const int partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0);
magmaDoubleComplex psum, psum_t;
magmaDoubleComplex total = MAGMA_Z_ZERO;
// sA is used as a 32x32 block, sA32(i,j),
// and as a 16x64 block, sA16(i,j), in different parts of the code.
// sA must be at least half_NB_X*bank_shift = 32x33 = 1056;
// quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056
__shared__ magmaDoubleComplex sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */
__shared__ magmaDoubleComplex sx_blk[NB_X]; // for x[ blk ]
__shared__ magmaDoubleComplex sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks left of diag
magmaDoubleComplex rA[4];
magmaDoubleComplex psums_t[4];
// --------------------
// load 64x1 block x(blk_ind + 0:63) into sx_blk
x += (blk_ind + tx)*incx; // x is x(blk_ind + tx)
if ( ty == 0 ) {
if ( partial == 0 || tx < partial ) {
sx_blk[tx] = x[0];
}
else {
sx_blk[tx] = MAGMA_Z_ZERO;
}
}
// --------------------
// move to block row
work += blk*lda; // work is work(0, blk)
A += blk_ind; // A is A(blk_ind, 0)
A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2)
// move to 32x32 diag block
A += blk_ind*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
// load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_Z_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2)
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle,
// as four 32x8 sections in parallel:
// columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = MAGMA_Z_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
// each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3)
psum = MAGMA_Z_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 32x32 diag block, then repeat steps from first diag block
A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2)
// load 32x32 diag block A[block + 0:31, block + 0:31] into sA
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_Z_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = MAGMA_Z_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
psum = MAGMA_Z_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to off-diag 32x32 block
A -= half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + ty2)
// load 32x32 block of A into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_Z_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// multiply 32x32 block (below diag)
psum = MAGMA_Z_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2 + j*8) * sx_blk[j*8 + ty2];
}
//__syncthreads(); // no sync needed here
// multiply transposed 32x32 block (above diag)
psum_t = MAGMA_Z_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += MAGMA_Z_CONJ( sA32(ty2*4 + j, tx2) ) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial sums for non-transposed 32x32 block
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// store partial sums for transposed 32x32 block
sA32(ty2, tx2) = psum_t;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to leftmost 64x64 block in block row, and
// switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block
A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + ty2)
A -= blk_ind*lda; // A is A(blk_ind + tx2, ty2)
A -= ty2*lda + tx2; // A is A(blk_ind, 0)
A += 4*ty*lda + tx; // A is A(blk_ind + tx, 4*ty)
if ( partial && tx >= partial ) {
A = A - tx + (partial - 1); // A is A(blk_ind + partial-1, 4*ty), the bottom-most valid row
}
x -= blk_ind*incx; // x is x(tx)
// 16x16 thread block
const int tx4 = td % quarter_NB_X;
const int ty4 = td / quarter_NB_X;
// cycle over blocks jj left of diagonal, in block row blk
for (int jj=0; jj < blk; ++jj) {
// load 64x1 block x(jj_ind + 0:63) into sx_jj
// since this block is left of diagonal, x must have all NB rows
if ( ty == 0 ) {
sx_jj[tx] = x[jj*NB_X*incx];
}
__syncthreads();
for (int k=0; k < 4; k++) {
// load 64x16 block of A into rA, 4 elements per thread,
// as four 64x4 sections in parallel:
// columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15
// since this block is left of diagonal, it has all NB columns,
// and block of x must have all NB rows.
#pragma unroll
for (int j=0; j < 4; j++) {
rA[j] = A[j*lda];
}
// 1) multiply 64x16 block A_{blk,jj} * x_jj
// each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k)
// 2) multiply transposed 16x64 block A_{blk,jj}^H * x_blk,
// storing each product Aji*xi to sA(j,i)
#pragma unroll
for (int j=0; j < 4; j++) {
total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj
sA16(ty*4 + j, tx) = MAGMA_Z_CONJ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk
}
__syncthreads();
// do partial row sums for transposed 16x64 result
// use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty)
// sum sixteen 16x4 sections in parallel:
// columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63
psum_t = MAGMA_Z_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA16(tx4, ty4*4 + j);
}
__syncthreads();
// store partial row sums of transposed result, y_jj (locally)
psums_t[k] = psum_t;
// move right to next 64x16 block
A += lda * quarter_NB_X; // A is A(blk_ind + tx#, jj*NB_x + (k+1)*NB_X/4 + 4*ty), # tx or partial
}
// already at next 64x64 block
// A is A(blk_ind + tx#, (jj+1)*NB_x + 4*ty), # tx or partial
// store partial row sums of transposed result, y_jj
#pragma unroll
for (int k=0; k < 4; k++) {
sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k];
}
__syncthreads();
// sum up partial row sums of transposed result, y_jj, and store final total to workspace
// thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16
// since this is the transposed block above the diagonal, it must have all NB rows
if ( ty4 < 4 ) {
int ty4_nb4 = ty4*quarter_NB_X;
psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4)
+ sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4)
+ sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4)
+ sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4)
+ sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4)
+ sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4)
+ sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4)
+ sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4);
work[jj*NB_X + tx4 + ty4_nb4] = psum_t; // store at work( jj*NB_X + tx4 + ty4*16, blk )
}
__syncthreads();
}
// store row sums
sA16(ty, tx) = total;
__syncthreads();
// sum up final total, y_blk, for row tx
if ( ty == 0 && (partial == 0 || tx < partial) ) {
total = sA16(0, tx)
+ sA16(1, tx)
+ sA16(2, tx)
+ sA16(3, tx);
work[blk*NB_X + tx] = total; // store at work( blk*NB_X + tx, blk )
}
#endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */
}
// end zhemv_kernel_L
/***************************************************************************//**
Lower case, sum up final results
Each block sums one block row; each thread sums one row.
On input (for 3 blocks):
[ (A11*x1) (A21^H*x2) (A31^H*x3) ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ]
On output:
[ (A11*x1) + (A21^H*x2) + (A31^H*x3) ]
y = alpha*[ (A21*x1 + A22*x2) + (A32^H*x3) ] + beta*y
[ (A21*x1 + A22*x2 + A33*x3) ]
*******************************************************************************/
__global__ void
zhemv_kernel_L_sum(
int n,
magmaDoubleComplex alpha,
int lda,
magmaDoubleComplex beta,
magmaDoubleComplex * __restrict__ y, int incy,
magmaDoubleComplex const * __restrict__ work )
{
int tx = threadIdx.x;
int blk = blockIdx.x;
int blk_ind = blk * NB_X;
int ind = blk_ind + tx;
int blocks = gridDim.x;
// Don't write outside [0, ..., n)
if ( ind < n ) {
work += ind + blk*lda;
magmaDoubleComplex Ax = MAGMA_Z_ZERO;
for (int j = blk; j < blocks; ++j) {
Ax += work[0];
work += lda;
}
y[ind * incy] = beta*y[ind * incy] + alpha*Ax;
}
}
/***************************************************************************//**
Purpose
-------
magmablas_zhemv_work performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n Hermitian matrix.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
- = MagmaUpper: Only the upper triangular part of A is to be referenced.
- = MagmaLower: Only the lower triangular part of A is to be referenced.
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
alpha COMPLEX_16.
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA COMPLEX_16 array of DIMENSION ( LDDA, n ).
Before entry with UPLO = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the Hermitian matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the Hermitian matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the first dimension of A as declared
in the calling (sub) program. LDDA must be at least
max( 1, n ).
It is recommended that ldda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
@param[in]
dx COMPLEX_16 array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
@param[in]
incx INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
@param[in]
beta COMPLEX_16.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[in,out]
dy COMPLEX_16 array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
@param[in]
incy INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
@param[in]
dwork (workspace) COMPLEX_16 array on the GPU, dimension (MAX(1, LWORK)),
@param[in]
lwork INTEGER.
The dimension of the array DWORK. LWORK >= LDDA * ceil( N / NB_X ),
where NB_X = 64.
@param[in]
queue magma_queue_t.
Queue to execute in.
MAGMA implements zhemv through two steps:
1) perform the multiplication in each thread block and put the
intermediate value in dwork.
2) sum the intermediate values and store the final result in y.
magamblas_zhemv_work requires users to provide a workspace, while
magmablas_zhemv is a wrapper routine allocating the workspace inside the
routine and provides the same interface as cublas.
If users need to call zhemv frequently, we suggest using
magmablas_zhemv_work instead of magmablas_zhemv. As the overhead to
allocate and free in device memory in magmablas_zhemv would hurt performance.
Our tests show that this penalty is about 10 Gflop/s when the matrix
size is around 10000.
@ingroup magma_hemv
*******************************************************************************/
extern "C"
magma_int_t
magmablas_zhemv_work(
magma_uplo_t uplo, magma_int_t n,
magmaDoubleComplex alpha,
magmaDoubleComplex_const_ptr dA, magma_int_t ldda,
magmaDoubleComplex_const_ptr dx, magma_int_t incx,
magmaDoubleComplex beta,
magmaDoubleComplex_ptr dy, magma_int_t incy,
magmaDoubleComplex_ptr dwork, magma_int_t lwork,
magma_queue_t queue )
{
#if defined(PRECISION_z)
// z precision requires CUDA ARCH 2.x; call CUBLAS version instead.
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
magma_zhemv( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue );
return MAGMA_SUCCESS;
}
#endif
// --------------------
// [sdc] precisions, or z precision with CUDA ARCH 2.x
bool upper = (uplo == MagmaUpper);
magma_int_t blocks = magma_ceildiv( n, NB_X );
magma_int_t lwmin = ldda*blocks;
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ((! upper) && (uplo != MagmaLower)) {
info = -1;
} else if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1, n) ) {
info = -5;
} else if ( incx == 0 ) {
info = -7;
} else if ( incy == 0 ) {
info = -10;
} else if ( lwork < lwmin ) {
info = -12;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_Z_EQUAL(alpha, MAGMA_Z_ZERO) && MAGMA_Z_EQUAL(beta, MAGMA_Z_ONE) ) )
return info;
dim3 grid( blocks, 1, 1 );
dim3 threads( NB_X, NB_Y, 1 );
dim3 threads_sum( NB_X, 1, 1 );
if ( upper ) {
hipLaunchKernelGGL(( zhemv_kernel_U), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
n, dA, ldda, dx, incx, dwork);
hipLaunchKernelGGL(( zhemv_kernel_U_sum), dim3(grid), dim3(threads_sum), 0, queue->cuda_stream() ,
n, alpha, ldda, beta, dy, incy, dwork);
}
else {
hipLaunchKernelGGL(( zhemv_kernel_L), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
n, dA, ldda, dx, incx, dwork);
hipLaunchKernelGGL(( zhemv_kernel_L_sum), dim3(grid), dim3(threads_sum), 0, queue->cuda_stream() ,
n, alpha, ldda, beta, dy, incy, dwork);
}
return info;
}
// end magmablas_zhemv_work
/***************************************************************************//**
Purpose
-------
magmablas_zhemv performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n Hermitian matrix.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
- = MagmaUpper: Only the upper triangular part of A is to be referenced.
- = MagmaLower: Only the lower triangular part of A is to be referenced.
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
alpha COMPLEX_16.
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA COMPLEX_16 array of DIMENSION ( LDDA, n ).
Before entry with UPLO = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the Hermitian matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the Hermitian matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the first dimension of A as declared
in the calling (sub) program. LDDA must be at least
max( 1, n ).
It is recommended that ldda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
@param[in]
dx COMPLEX_16 array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
@param[in]
incx INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
@param[in]
beta COMPLEX_16.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[in,out]
dy COMPLEX_16 array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
@param[in]
incy INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_hemv
*******************************************************************************/
extern "C"
magma_int_t
magmablas_zhemv_q(
magma_uplo_t uplo, magma_int_t n,
magmaDoubleComplex alpha,
magmaDoubleComplex_const_ptr dA, magma_int_t ldda,
magmaDoubleComplex_const_ptr dx, magma_int_t incx,
magmaDoubleComplex beta,
magmaDoubleComplex_ptr dy, magma_int_t incy,
magma_queue_t queue )
{
#if defined(PRECISION_z)
// z precision requires CUDA ARCH 2.x; call CUBLAS version instead.
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
magma_zhemv( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue );
return MAGMA_SUCCESS;
}
#endif
// --------------------
// [sdc] precisions, or z precision with CUDA ARCH 2.x
bool upper = (uplo == MagmaUpper);
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ((! upper) && (uplo != MagmaLower)) {
info = -1;
} else if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1, n) ) {
info = -5;
} else if ( incx == 0 ) {
info = -7;
} else if ( incy == 0 ) {
info = -10;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_Z_EQUAL(alpha, MAGMA_Z_ZERO) && MAGMA_Z_EQUAL(beta, MAGMA_Z_ONE) ) )
return info;
magmaDoubleComplex_ptr dwork;
magma_int_t blocks = magma_ceildiv( n, NB_X );
magma_int_t lwork = ldda*blocks;
magma_zmalloc( &dwork, lwork );
if ( dwork == NULL ) {
info = MAGMA_ERR_DEVICE_ALLOC;
magma_xerbla( __func__, -(info) );
return info;
}
magmablas_zhemv_work( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy,
dwork, lwork, queue );
magma_free( dwork );
return info;
}
// end magmablas_zhemv
| 368e18db9c084d9f7ec79eaad743217448f22dcf.cu | /*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
zsymv.cu is nearly identical to zhemv.cu, just change names and drop MAGMA_Z_CONJ.
zhemv_kernel_U (upper) in zhemv_upper.cu is very similar to
zhemv_kernel_L (lower) in zhemv.cu; diff the two files to compare.
@precisions normal z -> s d c
@author Mark Gates
*/
#include "magma_internal.h"
#include "commonblas_z.h"
#define PRECISION_z
#define NB_X 64
#define NB_Y 4
#define bank_shift 33
#define quarter_NB_X 16
#define half_NB_X 32
/***************************************************************************//**
Lower case, compute block multiply, work = A*x, for any size n:
[ (A11*x1) (A21^H*x2) (A31^H*x3) ] [ A11 A21^H A31^H ] [ x1 ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ] = [ A21 A22 A32^H ] * [ x2 ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ] [ A31 A32 A33 ] [ x3 ]
Uses a 64x4 thread block.
For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed).
For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles.
In both cases, each thread multiplies 4 elements.
For rows past the bottom of the matrix, the A pointer is adjusted to be the
last valid row of A, which multiple threads will read.
Extra rows are ignored when saving results to work.
Columns past the right edge are explicitly ignored when loading.
x values past the bottom are set to zero, thus, extra columns are zeroed
when multiplying.
Previously:
[ (A11*x1) --- ]
work = [ (A21^H*x2) (A21*x1 + A22*x2) --- ]
[ (A31^H*x3) (A32^H*x3) (A31*x1 + A32*x2 + A33*x3) ]
which doesn't work as well because that has dimension blocks*NB by blocks,
where blocks*NB >= n, and it can be that blocks*NB > lda, so it won't fit in
lda*blocks space. This is why it used to need lwork = lda*(blocks + 1).
*******************************************************************************/
__global__ void
zhemv_kernel_L(
int n,
magmaDoubleComplex const * __restrict__ A, int lda,
magmaDoubleComplex const * __restrict__ x, int incx,
magmaDoubleComplex * __restrict__ work)
{
#if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200)
// treats sA as 16x64 block
#define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ]
// treats sA as 32x32 block
#define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)])
// 64x4 thread block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blk = blockIdx.x;
const int blk_ind = NB_X * blk;
const int td = NB_X * ty + tx;
// 32x8 thread block
const int tx2 = td % half_NB_X;
const int ty2 = td / half_NB_X;
// If this blk has fewer than NB_X rows, partial is the number of valid rows,
// so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid.
// Else, partial == 0.
const int partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0);
magmaDoubleComplex psum, psum_t;
magmaDoubleComplex total = MAGMA_Z_ZERO;
// sA is used as a 32x32 block, sA32(i,j),
// and as a 16x64 block, sA16(i,j), in different parts of the code.
// sA must be at least half_NB_X*bank_shift = 32x33 = 1056;
// quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056
__shared__ magmaDoubleComplex sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */
__shared__ magmaDoubleComplex sx_blk[NB_X]; // for x[ blk ]
__shared__ magmaDoubleComplex sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks left of diag
magmaDoubleComplex rA[4];
magmaDoubleComplex psums_t[4];
// --------------------
// load 64x1 block x(blk_ind + 0:63) into sx_blk
x += (blk_ind + tx)*incx; // x is x(blk_ind + tx)
if ( ty == 0 ) {
if ( partial == 0 || tx < partial ) {
sx_blk[tx] = x[0];
}
else {
sx_blk[tx] = MAGMA_Z_ZERO;
}
}
// --------------------
// move to block row
work += blk*lda; // work is work(0, blk)
A += blk_ind; // A is A(blk_ind, 0)
A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2)
// move to 32x32 diag block
A += blk_ind*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
// load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_Z_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2)
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle,
// as four 32x8 sections in parallel:
// columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = MAGMA_Z_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
// each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3)
psum = MAGMA_Z_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 32x32 diag block, then repeat steps from first diag block
A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2)
// load 32x32 diag block A[block + 0:31, block + 0:31] into sA
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_Z_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = MAGMA_Z_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
psum = MAGMA_Z_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to off-diag 32x32 block
A -= half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + ty2)
// load 32x32 block of A into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_Z_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// multiply 32x32 block (below diag)
psum = MAGMA_Z_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2 + j*8) * sx_blk[j*8 + ty2];
}
//__syncthreads(); // no sync needed here
// multiply transposed 32x32 block (above diag)
psum_t = MAGMA_Z_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += MAGMA_Z_CONJ( sA32(ty2*4 + j, tx2) ) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial sums for non-transposed 32x32 block
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// store partial sums for transposed 32x32 block
sA32(ty2, tx2) = psum_t;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to leftmost 64x64 block in block row, and
// switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block
A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + ty2)
A -= blk_ind*lda; // A is A(blk_ind + tx2, ty2)
A -= ty2*lda + tx2; // A is A(blk_ind, 0)
A += 4*ty*lda + tx; // A is A(blk_ind + tx, 4*ty)
if ( partial && tx >= partial ) {
A = A - tx + (partial - 1); // A is A(blk_ind + partial-1, 4*ty), the bottom-most valid row
}
x -= blk_ind*incx; // x is x(tx)
// 16x16 thread block
const int tx4 = td % quarter_NB_X;
const int ty4 = td / quarter_NB_X;
// cycle over blocks jj left of diagonal, in block row blk
for (int jj=0; jj < blk; ++jj) {
// load 64x1 block x(jj_ind + 0:63) into sx_jj
// since this block is left of diagonal, x must have all NB rows
if ( ty == 0 ) {
sx_jj[tx] = x[jj*NB_X*incx];
}
__syncthreads();
for (int k=0; k < 4; k++) {
// load 64x16 block of A into rA, 4 elements per thread,
// as four 64x4 sections in parallel:
// columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15
// since this block is left of diagonal, it has all NB columns,
// and block of x must have all NB rows.
#pragma unroll
for (int j=0; j < 4; j++) {
rA[j] = A[j*lda];
}
// 1) multiply 64x16 block A_{blk,jj} * x_jj
// each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k)
// 2) multiply transposed 16x64 block A_{blk,jj}^H * x_blk,
// storing each product Aji*xi to sA(j,i)
#pragma unroll
for (int j=0; j < 4; j++) {
total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj
sA16(ty*4 + j, tx) = MAGMA_Z_CONJ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk
}
__syncthreads();
// do partial row sums for transposed 16x64 result
// use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty)
// sum sixteen 16x4 sections in parallel:
// columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63
psum_t = MAGMA_Z_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA16(tx4, ty4*4 + j);
}
__syncthreads();
// store partial row sums of transposed result, y_jj (locally)
psums_t[k] = psum_t;
// move right to next 64x16 block
A += lda * quarter_NB_X; // A is A(blk_ind + tx#, jj*NB_x + (k+1)*NB_X/4 + 4*ty), # tx or partial
}
// already at next 64x64 block
// A is A(blk_ind + tx#, (jj+1)*NB_x + 4*ty), # tx or partial
// store partial row sums of transposed result, y_jj
#pragma unroll
for (int k=0; k < 4; k++) {
sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k];
}
__syncthreads();
// sum up partial row sums of transposed result, y_jj, and store final total to workspace
// thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16
// since this is the transposed block above the diagonal, it must have all NB rows
if ( ty4 < 4 ) {
int ty4_nb4 = ty4*quarter_NB_X;
psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4)
+ sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4)
+ sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4)
+ sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4)
+ sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4)
+ sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4)
+ sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4)
+ sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4);
work[jj*NB_X + tx4 + ty4_nb4] = psum_t; // store at work( jj*NB_X + tx4 + ty4*16, blk )
}
__syncthreads();
}
// store row sums
sA16(ty, tx) = total;
__syncthreads();
// sum up final total, y_blk, for row tx
if ( ty == 0 && (partial == 0 || tx < partial) ) {
total = sA16(0, tx)
+ sA16(1, tx)
+ sA16(2, tx)
+ sA16(3, tx);
work[blk*NB_X + tx] = total; // store at work( blk*NB_X + tx, blk )
}
#endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */
}
// end zhemv_kernel_L
/***************************************************************************//**
Lower case, sum up final results
Each block sums one block row; each thread sums one row.
On input (for 3 blocks):
[ (A11*x1) (A21^H*x2) (A31^H*x3) ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ]
On output:
[ (A11*x1) + (A21^H*x2) + (A31^H*x3) ]
y = alpha*[ (A21*x1 + A22*x2) + (A32^H*x3) ] + beta*y
[ (A21*x1 + A22*x2 + A33*x3) ]
*******************************************************************************/
__global__ void
zhemv_kernel_L_sum(
int n,
magmaDoubleComplex alpha,
int lda,
magmaDoubleComplex beta,
magmaDoubleComplex * __restrict__ y, int incy,
magmaDoubleComplex const * __restrict__ work )
{
int tx = threadIdx.x;
int blk = blockIdx.x;
int blk_ind = blk * NB_X;
int ind = blk_ind + tx;
int blocks = gridDim.x;
// Don't write outside [0, ..., n)
if ( ind < n ) {
work += ind + blk*lda;
magmaDoubleComplex Ax = MAGMA_Z_ZERO;
for (int j = blk; j < blocks; ++j) {
Ax += work[0];
work += lda;
}
y[ind * incy] = beta*y[ind * incy] + alpha*Ax;
}
}
/***************************************************************************//**
Purpose
-------
magmablas_zhemv_work performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n Hermitian matrix.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
- = MagmaUpper: Only the upper triangular part of A is to be referenced.
- = MagmaLower: Only the lower triangular part of A is to be referenced.
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
alpha COMPLEX_16.
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA COMPLEX_16 array of DIMENSION ( LDDA, n ).
Before entry with UPLO = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the Hermitian matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the Hermitian matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the first dimension of A as declared
in the calling (sub) program. LDDA must be at least
max( 1, n ).
It is recommended that ldda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
@param[in]
dx COMPLEX_16 array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
@param[in]
incx INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
@param[in]
beta COMPLEX_16.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[in,out]
dy COMPLEX_16 array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
@param[in]
incy INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
@param[in]
dwork (workspace) COMPLEX_16 array on the GPU, dimension (MAX(1, LWORK)),
@param[in]
lwork INTEGER.
The dimension of the array DWORK. LWORK >= LDDA * ceil( N / NB_X ),
where NB_X = 64.
@param[in]
queue magma_queue_t.
Queue to execute in.
MAGMA implements zhemv through two steps:
1) perform the multiplication in each thread block and put the
intermediate value in dwork.
2) sum the intermediate values and store the final result in y.
magamblas_zhemv_work requires users to provide a workspace, while
magmablas_zhemv is a wrapper routine allocating the workspace inside the
routine and provides the same interface as cublas.
If users need to call zhemv frequently, we suggest using
magmablas_zhemv_work instead of magmablas_zhemv. As the overhead to
allocate and free in device memory in magmablas_zhemv would hurt performance.
Our tests show that this penalty is about 10 Gflop/s when the matrix
size is around 10000.
@ingroup magma_hemv
*******************************************************************************/
extern "C"
magma_int_t
magmablas_zhemv_work(
magma_uplo_t uplo, magma_int_t n,
magmaDoubleComplex alpha,
magmaDoubleComplex_const_ptr dA, magma_int_t ldda,
magmaDoubleComplex_const_ptr dx, magma_int_t incx,
magmaDoubleComplex beta,
magmaDoubleComplex_ptr dy, magma_int_t incy,
magmaDoubleComplex_ptr dwork, magma_int_t lwork,
magma_queue_t queue )
{
#if defined(PRECISION_z)
// z precision requires CUDA ARCH 2.x; call CUBLAS version instead.
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
magma_zhemv( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue );
return MAGMA_SUCCESS;
}
#endif
// --------------------
// [sdc] precisions, or z precision with CUDA ARCH 2.x
bool upper = (uplo == MagmaUpper);
magma_int_t blocks = magma_ceildiv( n, NB_X );
magma_int_t lwmin = ldda*blocks;
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ((! upper) && (uplo != MagmaLower)) {
info = -1;
} else if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1, n) ) {
info = -5;
} else if ( incx == 0 ) {
info = -7;
} else if ( incy == 0 ) {
info = -10;
} else if ( lwork < lwmin ) {
info = -12;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_Z_EQUAL(alpha, MAGMA_Z_ZERO) && MAGMA_Z_EQUAL(beta, MAGMA_Z_ONE) ) )
return info;
dim3 grid( blocks, 1, 1 );
dim3 threads( NB_X, NB_Y, 1 );
dim3 threads_sum( NB_X, 1, 1 );
if ( upper ) {
zhemv_kernel_U<<< grid, threads, 0, queue->cuda_stream() >>>
(n, dA, ldda, dx, incx, dwork);
zhemv_kernel_U_sum<<< grid, threads_sum, 0, queue->cuda_stream() >>>
(n, alpha, ldda, beta, dy, incy, dwork);
}
else {
zhemv_kernel_L<<< grid, threads, 0, queue->cuda_stream() >>>
(n, dA, ldda, dx, incx, dwork);
zhemv_kernel_L_sum<<< grid, threads_sum, 0, queue->cuda_stream() >>>
(n, alpha, ldda, beta, dy, incy, dwork);
}
return info;
}
// end magmablas_zhemv_work
/***************************************************************************//**
Purpose
-------
magmablas_zhemv performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n Hermitian matrix.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
- = MagmaUpper: Only the upper triangular part of A is to be referenced.
- = MagmaLower: Only the lower triangular part of A is to be referenced.
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
alpha COMPLEX_16.
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA COMPLEX_16 array of DIMENSION ( LDDA, n ).
Before entry with UPLO = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the Hermitian matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the Hermitian matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the first dimension of A as declared
in the calling (sub) program. LDDA must be at least
max( 1, n ).
It is recommended that ldda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
@param[in]
dx COMPLEX_16 array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
@param[in]
incx INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
@param[in]
beta COMPLEX_16.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[in,out]
dy COMPLEX_16 array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
@param[in]
incy INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_hemv
*******************************************************************************/
extern "C"
magma_int_t
magmablas_zhemv_q(
magma_uplo_t uplo, magma_int_t n,
magmaDoubleComplex alpha,
magmaDoubleComplex_const_ptr dA, magma_int_t ldda,
magmaDoubleComplex_const_ptr dx, magma_int_t incx,
magmaDoubleComplex beta,
magmaDoubleComplex_ptr dy, magma_int_t incy,
magma_queue_t queue )
{
#if defined(PRECISION_z)
// z precision requires CUDA ARCH 2.x; call CUBLAS version instead.
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
magma_zhemv( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue );
return MAGMA_SUCCESS;
}
#endif
// --------------------
// [sdc] precisions, or z precision with CUDA ARCH 2.x
bool upper = (uplo == MagmaUpper);
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ((! upper) && (uplo != MagmaLower)) {
info = -1;
} else if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1, n) ) {
info = -5;
} else if ( incx == 0 ) {
info = -7;
} else if ( incy == 0 ) {
info = -10;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_Z_EQUAL(alpha, MAGMA_Z_ZERO) && MAGMA_Z_EQUAL(beta, MAGMA_Z_ONE) ) )
return info;
magmaDoubleComplex_ptr dwork;
magma_int_t blocks = magma_ceildiv( n, NB_X );
magma_int_t lwork = ldda*blocks;
magma_zmalloc( &dwork, lwork );
if ( dwork == NULL ) {
info = MAGMA_ERR_DEVICE_ALLOC;
magma_xerbla( __func__, -(info) );
return info;
}
magmablas_zhemv_work( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy,
dwork, lwork, queue );
magma_free( dwork );
return info;
}
// end magmablas_zhemv
|
2d77ef16e3048c74af8be1b9ca9897f99c4352bd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "nbodycuda.cuh"
#include <unistd.h>
#include <sys/time.h>
using namespace GPU_ParticleDataset;
#define _DEBUG
#ifdef _DEBUG
#include <iostream>
using namespace std;
#endif //__DEBUG
ParticleDataset * hpDataset;
ParticleDataset::Particle * hpLocBuf;
GPUConfig gpuConfig;
ParticleList gParList_h;
ParticleList gParList_d;
ParticleList lParList_h;
ParticleList lParList_d;
__global__ void ComputeParticleAttributes_Kernel(
PRECISION * gxPos, PRECISION * gyPos, PRECISION * gzPos,
PRECISION * gxVel, PRECISION * gyVel, PRECISION * gzVel,
PRECISION * gxAcc, PRECISION * gyAcc, PRECISION * gzAcc,
PRECISION * gmass,
PRECISION * lxPos, PRECISION * lyPos, PRECISION * lzPos,
PRECISION * lxVel, PRECISION * lyVel, PRECISION * lzVel,
PRECISION * lxAcc, PRECISION * lyAcc, PRECISION * lzAcc,
PRECISION * lmass,
PRECISION step, PRECISION grav,
int localCnt, int localDisp,
int totalCnt) {
#if 1
int gOffset = ((blockIdx.z * (gridDim.x * gridDim.y)) +
(blockIdx.y * gridDim.x) + blockIdx.x) *
(blockDim.z * blockDim.y * blockDim.x);
int gid = ((threadIdx.z * (blockDim.x * blockDim.y)) +
(threadIdx.y * blockDim.x) + threadIdx.x) +
gOffset;
int total = (gridDim.x * gridDim.y * gridDim.z) *
(blockDim.x * blockDim.y * blockDim.z);
#else
int gid = (blockIdx.x * blockDim.x) + threadIdx.x;
int total = gridDim.x * blockDim.x;
#endif
int stride = (totalCnt / total) + 1;
for (int iIdx=0;iIdx<stride;iIdx++) {
int pid = (iIdx * total) + gid;
if (pid >= localCnt)
break;
PRECISION radius_s, radius;
PRECISION force, force_x = 0.0, force_y = 0.0, force_z = 0.0;
PRECISION x1Pos = lxPos[pid], y1Pos = lyPos[pid], z1Pos = lzPos[pid];
PRECISION mass1 = lmass[pid];
#if 1
for (int jIdx=0;jIdx<totalCnt;jIdx++) {
if (jIdx != (localDisp + pid)) {
PRECISION x2Pos = gxPos[jIdx];
PRECISION y2Pos = gyPos[jIdx];
PRECISION z2Pos = gzPos[jIdx];
PRECISION mass2 = gmass[jIdx];
radius_s = ((x2Pos - x1Pos) * (x2Pos - x1Pos)) +
((y2Pos - y1Pos) * (y2Pos - y1Pos)) +
((z2Pos - z1Pos) * (z2Pos - z1Pos));
radius = sqrt(radius_s);
force = (grav * mass1 * mass2) / radius_s;
force_x += force * ((x2Pos - x1Pos) / radius);
force_y += force * ((y2Pos - y1Pos) / radius);
force_z += force * ((z2Pos - z1Pos) / radius);
}
}
#endif
lxAcc[pid] = force_x / mass1;
lyAcc[pid] = force_y / mass1;
lzAcc[pid] = force_z / mass1;
lxVel[pid] += lxAcc[pid] * step;
lyVel[pid] += lyAcc[pid] * step;
lzVel[pid] += lzAcc[pid] * step;
lxPos[pid] += lxVel[pid] * step;
lyPos[pid] += lyVel[pid] * step;
lzPos[pid] += lzVel[pid] * step;
}
}
#define CHK_ERR(str) \
do { \
hipError_t ce = str; \
if (ce != hipSuccess) \
return ce; \
} while (0)
hipError_t ConvertHostToDevice() {
for (int idx=0;idx<hpDataset->mNumParticles;idx++) {
gParList_h.xPos[idx] = hpDataset->mpParticle[idx].xPos;
gParList_h.yPos[idx] = hpDataset->mpParticle[idx].yPos;
gParList_h.zPos[idx] = hpDataset->mpParticle[idx].zPos;
gParList_h.xVel[idx] = hpDataset->mpParticle[idx].xVel;
gParList_h.yVel[idx] = hpDataset->mpParticle[idx].yVel;
gParList_h.zVel[idx] = hpDataset->mpParticle[idx].zVel;
gParList_h.xAcc[idx] = hpDataset->mpParticle[idx].xAcc;
gParList_h.yAcc[idx] = hpDataset->mpParticle[idx].yAcc;
gParList_h.zAcc[idx] = hpDataset->mpParticle[idx].zAcc;
gParList_h.mass[idx] = hpDataset->mpParticle[idx].mass;
}
memcpy(lParList_h.xPos, gParList_h.xPos + gpuConfig.localDisp,
gpuConfig.localCnt * sizeof(PRECISION));
memcpy(lParList_h.yPos, gParList_h.yPos + gpuConfig.localDisp,
gpuConfig.localCnt * sizeof(PRECISION));
memcpy(lParList_h.zPos, gParList_h.zPos + gpuConfig.localDisp,
gpuConfig.localCnt * sizeof(PRECISION));
memcpy(lParList_h.xVel, gParList_h.xVel + gpuConfig.localDisp,
gpuConfig.localCnt * sizeof(PRECISION));
memcpy(lParList_h.yVel, gParList_h.yVel + gpuConfig.localDisp,
gpuConfig.localCnt * sizeof(PRECISION));
memcpy(lParList_h.zVel, gParList_h.zVel + gpuConfig.localDisp,
gpuConfig.localCnt * sizeof(PRECISION));
memcpy(lParList_h.xAcc, gParList_h.xAcc + gpuConfig.localDisp,
gpuConfig.localCnt * sizeof(PRECISION));
memcpy(lParList_h.yAcc, gParList_h.yAcc + gpuConfig.localDisp,
gpuConfig.localCnt * sizeof(PRECISION));
memcpy(lParList_h.zAcc, gParList_h.zAcc + gpuConfig.localDisp,
gpuConfig.localCnt * sizeof(PRECISION));
memcpy(lParList_h.mass, gParList_h.mass + gpuConfig.localDisp,
gpuConfig.localCnt * sizeof(PRECISION));
#if 1
CHK_ERR( hipMemcpy(gParList_d.xPos, gParList_h.xPos,
hpDataset->mNumParticles * sizeof(PRECISION), hipMemcpyHostToDevice));
CHK_ERR( hipMemcpy(gParList_d.yPos, gParList_h.yPos,
hpDataset->mNumParticles * sizeof(PRECISION), hipMemcpyHostToDevice));
CHK_ERR( hipMemcpy(gParList_d.zPos, gParList_h.zPos,
hpDataset->mNumParticles * sizeof(PRECISION), hipMemcpyHostToDevice));
CHK_ERR( hipMemcpy(gParList_d.xVel, gParList_h.xVel,
hpDataset->mNumParticles * sizeof(PRECISION), hipMemcpyHostToDevice));
CHK_ERR( hipMemcpy(gParList_d.yVel, gParList_h.yVel,
hpDataset->mNumParticles * sizeof(PRECISION), hipMemcpyHostToDevice));
CHK_ERR( hipMemcpy(gParList_d.zVel, gParList_h.zVel,
hpDataset->mNumParticles * sizeof(PRECISION), hipMemcpyHostToDevice));
CHK_ERR( hipMemcpy(gParList_d.xAcc, gParList_h.xAcc,
hpDataset->mNumParticles * sizeof(PRECISION), hipMemcpyHostToDevice));
CHK_ERR( hipMemcpy(gParList_d.yAcc, gParList_h.yAcc,
hpDataset->mNumParticles * sizeof(PRECISION), hipMemcpyHostToDevice));
CHK_ERR( hipMemcpy(gParList_d.zAcc, gParList_h.zAcc,
hpDataset->mNumParticles * sizeof(PRECISION), hipMemcpyHostToDevice));
CHK_ERR( hipMemcpy(gParList_d.mass, gParList_h.mass,
hpDataset->mNumParticles * sizeof(PRECISION), hipMemcpyHostToDevice));
#endif
CHK_ERR( hipMemcpy(lParList_d.xPos, lParList_h.xPos,
gpuConfig.localCnt * sizeof(PRECISION), hipMemcpyHostToDevice));
CHK_ERR( hipMemcpy(lParList_d.yPos, lParList_h.yPos,
gpuConfig.localCnt * sizeof(PRECISION), hipMemcpyHostToDevice));
CHK_ERR( hipMemcpy(lParList_d.zPos, lParList_h.zPos,
gpuConfig.localCnt * sizeof(PRECISION), hipMemcpyHostToDevice));
CHK_ERR( hipMemcpy(lParList_d.xVel, lParList_h.xVel,
gpuConfig.localCnt * sizeof(PRECISION), hipMemcpyHostToDevice));
CHK_ERR( hipMemcpy(lParList_d.yVel, lParList_h.yVel,
gpuConfig.localCnt * sizeof(PRECISION), hipMemcpyHostToDevice));
CHK_ERR( hipMemcpy(lParList_d.zVel, lParList_h.zVel,
gpuConfig.localCnt * sizeof(PRECISION), hipMemcpyHostToDevice));
CHK_ERR( hipMemcpy(lParList_d.xAcc, lParList_h.xAcc,
gpuConfig.localCnt * sizeof(PRECISION), hipMemcpyHostToDevice));
CHK_ERR( hipMemcpy(lParList_d.yAcc, lParList_h.yAcc,
gpuConfig.localCnt * sizeof(PRECISION), hipMemcpyHostToDevice));
CHK_ERR( hipMemcpy(lParList_d.zAcc, lParList_h.zAcc,
gpuConfig.localCnt * sizeof(PRECISION), hipMemcpyHostToDevice));
CHK_ERR( hipMemcpy(lParList_d.mass, lParList_h.mass,
gpuConfig.localCnt * sizeof(PRECISION), hipMemcpyHostToDevice));
cout << "Memcpy Counter: " << gpuConfig.localCnt << " and global: " << hpDataset->mNumParticles << endl;
return hipSuccess;
}
hipError_t ConvertDeviceToHost() {
CHK_ERR( hipMemcpy(lParList_h.xPos, lParList_d.xPos,
gpuConfig.localCnt * sizeof(PRECISION), hipMemcpyDeviceToHost));
CHK_ERR( hipMemcpy(lParList_h.yPos, lParList_d.yPos,
gpuConfig.localCnt * sizeof(PRECISION), hipMemcpyDeviceToHost));
CHK_ERR( hipMemcpy(lParList_h.zPos, lParList_d.zPos,
gpuConfig.localCnt * sizeof(PRECISION), hipMemcpyDeviceToHost));
CHK_ERR( hipMemcpy(lParList_h.xVel, lParList_d.xVel,
gpuConfig.localCnt * sizeof(PRECISION), hipMemcpyDeviceToHost));
CHK_ERR( hipMemcpy(lParList_h.yVel, lParList_d.yVel,
gpuConfig.localCnt * sizeof(PRECISION), hipMemcpyDeviceToHost));
CHK_ERR( hipMemcpy(lParList_h.zVel, lParList_d.zVel,
gpuConfig.localCnt * sizeof(PRECISION), hipMemcpyDeviceToHost));
CHK_ERR( hipMemcpy(lParList_h.xAcc, lParList_d.xAcc,
gpuConfig.localCnt * sizeof(PRECISION), hipMemcpyDeviceToHost));
CHK_ERR( hipMemcpy(lParList_h.yAcc, lParList_d.yAcc,
gpuConfig.localCnt * sizeof(PRECISION), hipMemcpyDeviceToHost));
CHK_ERR( hipMemcpy(lParList_h.zAcc, lParList_d.zAcc,
gpuConfig.localCnt * sizeof(PRECISION), hipMemcpyDeviceToHost));
CHK_ERR( hipMemcpy(lParList_h.mass, lParList_d.mass,
gpuConfig.localCnt * sizeof(PRECISION), hipMemcpyDeviceToHost));
hpLocBuf->localCnt = gpuConfig.localCnt;
hpLocBuf->localDisp = gpuConfig.localDisp;
hpLocBuf->pid = gpuConfig.pid;
for (int idx=0;idx<gpuConfig.localCnt;idx++) {
hpLocBuf[idx].xPos = lParList_h.xPos[idx];
hpLocBuf[idx].yPos = lParList_h.yPos[idx];
hpLocBuf[idx].zPos = lParList_h.zPos[idx];
hpLocBuf[idx].xVel = lParList_h.xVel[idx];
hpLocBuf[idx].yVel = lParList_h.yVel[idx];
hpLocBuf[idx].zVel = lParList_h.zVel[idx];
hpLocBuf[idx].xAcc = lParList_h.xAcc[idx];
hpLocBuf[idx].yAcc = lParList_h.yAcc[idx];
hpLocBuf[idx].zAcc = lParList_h.zAcc[idx];
hpLocBuf[idx].mass = lParList_h.mass[idx];
}
return hipSuccess;
}
hipError_t InitializeParticleLists() {
//Allocate space for storing entire hpDataset (GPU)
CHK_ERR( hipMalloc(&(gParList_d.xPos), hpDataset->mNumParticles *
sizeof(PRECISION)));
CHK_ERR( hipMalloc(&(gParList_d.yPos), hpDataset->mNumParticles *
sizeof(PRECISION)));
CHK_ERR( hipMalloc(&(gParList_d.zPos), hpDataset->mNumParticles *
sizeof(PRECISION)));
CHK_ERR( hipMalloc(&(gParList_d.xVel), hpDataset->mNumParticles *
sizeof(PRECISION)));
CHK_ERR( hipMalloc(&(gParList_d.yVel), hpDataset->mNumParticles *
sizeof(PRECISION)));
CHK_ERR( hipMalloc(&(gParList_d.zVel), hpDataset->mNumParticles *
sizeof(PRECISION)));
CHK_ERR( hipMalloc(&(gParList_d.xAcc), hpDataset->mNumParticles *
sizeof(PRECISION)));
CHK_ERR( hipMalloc(&(gParList_d.yAcc), hpDataset->mNumParticles *
sizeof(PRECISION)));
CHK_ERR( hipMalloc(&(gParList_d.zAcc), hpDataset->mNumParticles *
sizeof(PRECISION)));
CHK_ERR( hipMalloc(&(gParList_d.mass), hpDataset->mNumParticles *
sizeof(PRECISION)));
//Allocate local/working space (GPU)
CHK_ERR( hipMalloc(&(lParList_d.xPos), gpuConfig.localCnt *
sizeof(PRECISION)));
CHK_ERR( hipMalloc(&(lParList_d.yPos), gpuConfig.localCnt *
sizeof(PRECISION)));
CHK_ERR( hipMalloc(&(lParList_d.zPos), gpuConfig.localCnt *
sizeof(PRECISION)));
CHK_ERR( hipMalloc(&(lParList_d.xVel), gpuConfig.localCnt *
sizeof(PRECISION)));
CHK_ERR( hipMalloc(&(lParList_d.yVel), gpuConfig.localCnt *
sizeof(PRECISION)));
CHK_ERR( hipMalloc(&(lParList_d.zVel), gpuConfig.localCnt *
sizeof(PRECISION)));
CHK_ERR( hipMalloc(&(lParList_d.xAcc), gpuConfig.localCnt *
sizeof(PRECISION)));
CHK_ERR( hipMalloc(&(lParList_d.yAcc), gpuConfig.localCnt *
sizeof(PRECISION)));
CHK_ERR( hipMalloc(&(lParList_d.zAcc), gpuConfig.localCnt *
sizeof(PRECISION)));
CHK_ERR( hipMalloc(&(lParList_d.mass), gpuConfig.localCnt *
sizeof(PRECISION)));
#define CHK_HALLOC(var, size) \
do { \
var = new PRECISION [size]; \
if (var == NULL) \
return hipErrorUnknown; \
} while (0);
//Allocate space for storing entire hpDataset (Host)
CHK_HALLOC(gParList_h.xPos, hpDataset->mNumParticles);
CHK_HALLOC(gParList_h.yPos, hpDataset->mNumParticles);
CHK_HALLOC(gParList_h.zPos, hpDataset->mNumParticles);
CHK_HALLOC(gParList_h.xVel, hpDataset->mNumParticles);
CHK_HALLOC(gParList_h.yVel, hpDataset->mNumParticles);
CHK_HALLOC(gParList_h.zVel, hpDataset->mNumParticles);
CHK_HALLOC(gParList_h.xAcc, hpDataset->mNumParticles);
CHK_HALLOC(gParList_h.yAcc, hpDataset->mNumParticles);
CHK_HALLOC(gParList_h.zAcc, hpDataset->mNumParticles);
CHK_HALLOC(gParList_h.mass, hpDataset->mNumParticles);
//Allocate local/working space (Host)
CHK_HALLOC(lParList_h.xPos, hpDataset->mNumParticles);
CHK_HALLOC(lParList_h.yPos, hpDataset->mNumParticles);
CHK_HALLOC(lParList_h.zPos, hpDataset->mNumParticles);
CHK_HALLOC(lParList_h.xVel, hpDataset->mNumParticles);
CHK_HALLOC(lParList_h.yVel, hpDataset->mNumParticles);
CHK_HALLOC(lParList_h.zVel, hpDataset->mNumParticles);
CHK_HALLOC(lParList_h.xAcc, hpDataset->mNumParticles);
CHK_HALLOC(lParList_h.yAcc, hpDataset->mNumParticles);
CHK_HALLOC(lParList_h.zAcc, hpDataset->mNumParticles);
CHK_HALLOC(lParList_h.mass, hpDataset->mNumParticles);
#undef CHK_HALLOC
// ConvertHostToDevice();
return hipSuccess;
}
hipError_t DestroyParticleLists() {
CHK_ERR( hipFree(gParList_d.xPos));
CHK_ERR( hipFree(gParList_d.yPos));
CHK_ERR( hipFree(gParList_d.zPos));
CHK_ERR( hipFree(gParList_d.xVel));
CHK_ERR( hipFree(gParList_d.yVel));
CHK_ERR( hipFree(gParList_d.zVel));
CHK_ERR( hipFree(gParList_d.xAcc));
CHK_ERR( hipFree(gParList_d.yAcc));
CHK_ERR( hipFree(gParList_d.zAcc));
CHK_ERR( hipFree(gParList_d.mass));
CHK_ERR( hipFree(lParList_d.xPos));
CHK_ERR( hipFree(lParList_d.yPos));
CHK_ERR( hipFree(lParList_d.zPos));
CHK_ERR( hipFree(lParList_d.xVel));
CHK_ERR( hipFree(lParList_d.yVel));
CHK_ERR( hipFree(lParList_d.zVel));
CHK_ERR( hipFree(lParList_d.xAcc));
CHK_ERR( hipFree(lParList_d.yAcc));
CHK_ERR( hipFree(lParList_d.zAcc));
CHK_ERR( hipFree(lParList_d.mass));
delete [] gParList_h.xPos;
delete [] gParList_h.yPos;
delete [] gParList_h.zPos;
delete [] gParList_h.xVel;
delete [] gParList_h.yVel;
delete [] gParList_h.zVel;
delete [] gParList_h.xAcc;
delete [] gParList_h.yAcc;
delete [] gParList_h.zAcc;
delete [] gParList_h.mass;
delete [] lParList_h.xPos;
delete [] lParList_h.yPos;
delete [] lParList_h.zPos;
delete [] lParList_h.xVel;
delete [] lParList_h.yVel;
delete [] lParList_h.zVel;
delete [] lParList_h.xAcc;
delete [] lParList_h.yAcc;
delete [] lParList_h.zAcc;
delete [] lParList_h.mass;
return hipSuccess;
}
hipError_t CudaInitialize(ParticleDataset * dataset,
ParticleDataset::Particle * locBuf,
GPU_ParticleDataset::GPUConfig gConfig) {
hpDataset = dataset;
hpLocBuf = locBuf;
gpuConfig = gConfig;
return hipSuccess;
}
hipError_t CudaClean() {
DestroyParticleLists();
return hipSuccess;
}
hipError_t ComputeParticleAttributes(ParticleDataset * dataset,
ParticleDataset::Particle * locBuf,
GPU_ParticleDataset::GPUConfig gConfig, PRECISION step, PRECISION grav, int peid, int pid, int localCnt, int localDisp, int numParticles) {
hpDataset = dataset;
hpLocBuf = locBuf;
gpuConfig = gConfig;
gpuConfig.localCnt = localCnt;
gpuConfig.localDisp = localDisp;
gpuConfig.pid = pid;
gpuConfig.grav = grav;
gpuConfig.step = step;
gpuConfig.dev = peid;
int deviceCnt = 0;
CHK_ERR( hipGetDeviceCount(&deviceCnt));
CHK_ERR( hipSetDevice(peid % deviceCnt));
CHK_ERR( InitializeParticleLists());
CHK_ERR( ConvertHostToDevice());
char hostname[128];
gethostname(hostname, 128);
cout << "\t\t[" << gpuConfig.pid << "]: hipSetDevice " <<
peid % deviceCnt << " on [" << hostname << "]"<< endl;
struct timeval tb, te;
// cout << "Launching" << endl;
gettimeofday(&tb, NULL);
#if 1
hipLaunchKernelGGL(( ComputeParticleAttributes_Kernel) , dim3(16), dim3(256), 0, 0,
gParList_d.xPos, gParList_d.yPos, gParList_d.zPos,
gParList_d.xVel, gParList_d.yVel, gParList_d.zVel,
gParList_d.xAcc, gParList_d.yAcc, gParList_d.zAcc,
gParList_d.mass,
lParList_d.xPos, lParList_d.yPos, lParList_d.zPos,
lParList_d.xVel, lParList_d.yVel, lParList_d.zVel,
lParList_d.xAcc, lParList_d.yAcc, lParList_d.zAcc,
lParList_d.mass,
step, grav,
localCnt, localDisp, numParticles);
CHK_ERR( hipDeviceSynchronize());
#else
int imax, jmax, kmax;
if (!strcmp(hostname, "c11u11.build.local")) {
imax = 7;
jmax = 1000;
kmax = 1000000;
}
else if (!strcmp(hostname, "c11u13.build.local")) {
imax = 7;
jmax = 1000;
kmax = 1000000;
}
else if (!strcmp(hostname, "c11u15.build.local")) {
imax = 20;
jmax = 1000;
kmax = 1000000;
}
else if (!strcmp(hostname, "c11u17.build.local")) {
imax = 20;
jmax = 1000;
kmax = 1000000;
}
else if (!strcmp(hostname, "c11u19.build.local")) {
imax = 5;
jmax = 1000;
kmax = 1000000;
}
else if (!strcmp(hostname, "c11u21.build.local")) {
imax = 5;
jmax = 1000;
kmax = 1000000;
}
else if (!strcmp(hostname, "c12u9.build.local")) {
imax = 10;
jmax = 1000;
kmax = 1000000;
}
else if (!strcmp(hostname, "c12u11.build.local")) {
imax = 10;
jmax = 1000;
kmax = 1000000;
}
else if (!strcmp(hostname, "c12u13.build.local")) {
imax = 10;
jmax = 1000;
kmax = 1000000;
}
else if (!strcmp(hostname, "c12u15.build.local")) {
imax = 10;
jmax = 1000;
kmax = 1000000;
}
volatile float *a, *b,*c;
a = new volatile float[10000000];
b = new volatile float[10000000];
c = new volatile float[10000000];
for (int i=0;i<imax;i++) {
for (int j=0;j<jmax;j++) {
for (int k=0;k<kmax;k++) {
c[k] = a[k] * b[k];
}
}
}
#endif
gettimeofday(&te, NULL);
cout << "\t***Kernel: " << ((te.tv_usec - tb.tv_usec) * 1e-6 + ((double)te.tv_sec - (double)tb.tv_sec)) << " and number: " << gpuConfig.localCnt<< " and disp: " << gpuConfig.localDisp << " global: " << hpDataset->mNumParticles << endl;
CHK_ERR( ConvertDeviceToHost());
DestroyParticleLists();
// cout << "MMM[" << gpuConfig.pid << "]: " << hpLocBuf[0].xPos<< endl;
return hipSuccess;
}
| 2d77ef16e3048c74af8be1b9ca9897f99c4352bd.cu | #include "nbodycuda.cuh"
#include <unistd.h>
#include <sys/time.h>
using namespace GPU_ParticleDataset;
#define _DEBUG
#ifdef _DEBUG
#include <iostream>
using namespace std;
#endif //__DEBUG
ParticleDataset * hpDataset;
ParticleDataset::Particle * hpLocBuf;
GPUConfig gpuConfig;
ParticleList gParList_h;
ParticleList gParList_d;
ParticleList lParList_h;
ParticleList lParList_d;
__global__ void ComputeParticleAttributes_Kernel(
PRECISION * gxPos, PRECISION * gyPos, PRECISION * gzPos,
PRECISION * gxVel, PRECISION * gyVel, PRECISION * gzVel,
PRECISION * gxAcc, PRECISION * gyAcc, PRECISION * gzAcc,
PRECISION * gmass,
PRECISION * lxPos, PRECISION * lyPos, PRECISION * lzPos,
PRECISION * lxVel, PRECISION * lyVel, PRECISION * lzVel,
PRECISION * lxAcc, PRECISION * lyAcc, PRECISION * lzAcc,
PRECISION * lmass,
PRECISION step, PRECISION grav,
int localCnt, int localDisp,
int totalCnt) {
#if 1
int gOffset = ((blockIdx.z * (gridDim.x * gridDim.y)) +
(blockIdx.y * gridDim.x) + blockIdx.x) *
(blockDim.z * blockDim.y * blockDim.x);
int gid = ((threadIdx.z * (blockDim.x * blockDim.y)) +
(threadIdx.y * blockDim.x) + threadIdx.x) +
gOffset;
int total = (gridDim.x * gridDim.y * gridDim.z) *
(blockDim.x * blockDim.y * blockDim.z);
#else
int gid = (blockIdx.x * blockDim.x) + threadIdx.x;
int total = gridDim.x * blockDim.x;
#endif
int stride = (totalCnt / total) + 1;
for (int iIdx=0;iIdx<stride;iIdx++) {
int pid = (iIdx * total) + gid;
if (pid >= localCnt)
break;
PRECISION radius_s, radius;
PRECISION force, force_x = 0.0, force_y = 0.0, force_z = 0.0;
PRECISION x1Pos = lxPos[pid], y1Pos = lyPos[pid], z1Pos = lzPos[pid];
PRECISION mass1 = lmass[pid];
#if 1
for (int jIdx=0;jIdx<totalCnt;jIdx++) {
if (jIdx != (localDisp + pid)) {
PRECISION x2Pos = gxPos[jIdx];
PRECISION y2Pos = gyPos[jIdx];
PRECISION z2Pos = gzPos[jIdx];
PRECISION mass2 = gmass[jIdx];
radius_s = ((x2Pos - x1Pos) * (x2Pos - x1Pos)) +
((y2Pos - y1Pos) * (y2Pos - y1Pos)) +
((z2Pos - z1Pos) * (z2Pos - z1Pos));
radius = sqrt(radius_s);
force = (grav * mass1 * mass2) / radius_s;
force_x += force * ((x2Pos - x1Pos) / radius);
force_y += force * ((y2Pos - y1Pos) / radius);
force_z += force * ((z2Pos - z1Pos) / radius);
}
}
#endif
lxAcc[pid] = force_x / mass1;
lyAcc[pid] = force_y / mass1;
lzAcc[pid] = force_z / mass1;
lxVel[pid] += lxAcc[pid] * step;
lyVel[pid] += lyAcc[pid] * step;
lzVel[pid] += lzAcc[pid] * step;
lxPos[pid] += lxVel[pid] * step;
lyPos[pid] += lyVel[pid] * step;
lzPos[pid] += lzVel[pid] * step;
}
}
#define CHK_ERR(str) \
do { \
cudaError_t ce = str; \
if (ce != cudaSuccess) \
return ce; \
} while (0)
cudaError_t ConvertHostToDevice() {
for (int idx=0;idx<hpDataset->mNumParticles;idx++) {
gParList_h.xPos[idx] = hpDataset->mpParticle[idx].xPos;
gParList_h.yPos[idx] = hpDataset->mpParticle[idx].yPos;
gParList_h.zPos[idx] = hpDataset->mpParticle[idx].zPos;
gParList_h.xVel[idx] = hpDataset->mpParticle[idx].xVel;
gParList_h.yVel[idx] = hpDataset->mpParticle[idx].yVel;
gParList_h.zVel[idx] = hpDataset->mpParticle[idx].zVel;
gParList_h.xAcc[idx] = hpDataset->mpParticle[idx].xAcc;
gParList_h.yAcc[idx] = hpDataset->mpParticle[idx].yAcc;
gParList_h.zAcc[idx] = hpDataset->mpParticle[idx].zAcc;
gParList_h.mass[idx] = hpDataset->mpParticle[idx].mass;
}
memcpy(lParList_h.xPos, gParList_h.xPos + gpuConfig.localDisp,
gpuConfig.localCnt * sizeof(PRECISION));
memcpy(lParList_h.yPos, gParList_h.yPos + gpuConfig.localDisp,
gpuConfig.localCnt * sizeof(PRECISION));
memcpy(lParList_h.zPos, gParList_h.zPos + gpuConfig.localDisp,
gpuConfig.localCnt * sizeof(PRECISION));
memcpy(lParList_h.xVel, gParList_h.xVel + gpuConfig.localDisp,
gpuConfig.localCnt * sizeof(PRECISION));
memcpy(lParList_h.yVel, gParList_h.yVel + gpuConfig.localDisp,
gpuConfig.localCnt * sizeof(PRECISION));
memcpy(lParList_h.zVel, gParList_h.zVel + gpuConfig.localDisp,
gpuConfig.localCnt * sizeof(PRECISION));
memcpy(lParList_h.xAcc, gParList_h.xAcc + gpuConfig.localDisp,
gpuConfig.localCnt * sizeof(PRECISION));
memcpy(lParList_h.yAcc, gParList_h.yAcc + gpuConfig.localDisp,
gpuConfig.localCnt * sizeof(PRECISION));
memcpy(lParList_h.zAcc, gParList_h.zAcc + gpuConfig.localDisp,
gpuConfig.localCnt * sizeof(PRECISION));
memcpy(lParList_h.mass, gParList_h.mass + gpuConfig.localDisp,
gpuConfig.localCnt * sizeof(PRECISION));
#if 1
CHK_ERR( cudaMemcpy(gParList_d.xPos, gParList_h.xPos,
hpDataset->mNumParticles * sizeof(PRECISION), cudaMemcpyHostToDevice));
CHK_ERR( cudaMemcpy(gParList_d.yPos, gParList_h.yPos,
hpDataset->mNumParticles * sizeof(PRECISION), cudaMemcpyHostToDevice));
CHK_ERR( cudaMemcpy(gParList_d.zPos, gParList_h.zPos,
hpDataset->mNumParticles * sizeof(PRECISION), cudaMemcpyHostToDevice));
CHK_ERR( cudaMemcpy(gParList_d.xVel, gParList_h.xVel,
hpDataset->mNumParticles * sizeof(PRECISION), cudaMemcpyHostToDevice));
CHK_ERR( cudaMemcpy(gParList_d.yVel, gParList_h.yVel,
hpDataset->mNumParticles * sizeof(PRECISION), cudaMemcpyHostToDevice));
CHK_ERR( cudaMemcpy(gParList_d.zVel, gParList_h.zVel,
hpDataset->mNumParticles * sizeof(PRECISION), cudaMemcpyHostToDevice));
CHK_ERR( cudaMemcpy(gParList_d.xAcc, gParList_h.xAcc,
hpDataset->mNumParticles * sizeof(PRECISION), cudaMemcpyHostToDevice));
CHK_ERR( cudaMemcpy(gParList_d.yAcc, gParList_h.yAcc,
hpDataset->mNumParticles * sizeof(PRECISION), cudaMemcpyHostToDevice));
CHK_ERR( cudaMemcpy(gParList_d.zAcc, gParList_h.zAcc,
hpDataset->mNumParticles * sizeof(PRECISION), cudaMemcpyHostToDevice));
CHK_ERR( cudaMemcpy(gParList_d.mass, gParList_h.mass,
hpDataset->mNumParticles * sizeof(PRECISION), cudaMemcpyHostToDevice));
#endif
CHK_ERR( cudaMemcpy(lParList_d.xPos, lParList_h.xPos,
gpuConfig.localCnt * sizeof(PRECISION), cudaMemcpyHostToDevice));
CHK_ERR( cudaMemcpy(lParList_d.yPos, lParList_h.yPos,
gpuConfig.localCnt * sizeof(PRECISION), cudaMemcpyHostToDevice));
CHK_ERR( cudaMemcpy(lParList_d.zPos, lParList_h.zPos,
gpuConfig.localCnt * sizeof(PRECISION), cudaMemcpyHostToDevice));
CHK_ERR( cudaMemcpy(lParList_d.xVel, lParList_h.xVel,
gpuConfig.localCnt * sizeof(PRECISION), cudaMemcpyHostToDevice));
CHK_ERR( cudaMemcpy(lParList_d.yVel, lParList_h.yVel,
gpuConfig.localCnt * sizeof(PRECISION), cudaMemcpyHostToDevice));
CHK_ERR( cudaMemcpy(lParList_d.zVel, lParList_h.zVel,
gpuConfig.localCnt * sizeof(PRECISION), cudaMemcpyHostToDevice));
CHK_ERR( cudaMemcpy(lParList_d.xAcc, lParList_h.xAcc,
gpuConfig.localCnt * sizeof(PRECISION), cudaMemcpyHostToDevice));
CHK_ERR( cudaMemcpy(lParList_d.yAcc, lParList_h.yAcc,
gpuConfig.localCnt * sizeof(PRECISION), cudaMemcpyHostToDevice));
CHK_ERR( cudaMemcpy(lParList_d.zAcc, lParList_h.zAcc,
gpuConfig.localCnt * sizeof(PRECISION), cudaMemcpyHostToDevice));
CHK_ERR( cudaMemcpy(lParList_d.mass, lParList_h.mass,
gpuConfig.localCnt * sizeof(PRECISION), cudaMemcpyHostToDevice));
cout << "Memcpy Counter: " << gpuConfig.localCnt << " and global: " << hpDataset->mNumParticles << endl;
return cudaSuccess;
}
cudaError_t ConvertDeviceToHost() {
CHK_ERR( cudaMemcpy(lParList_h.xPos, lParList_d.xPos,
gpuConfig.localCnt * sizeof(PRECISION), cudaMemcpyDeviceToHost));
CHK_ERR( cudaMemcpy(lParList_h.yPos, lParList_d.yPos,
gpuConfig.localCnt * sizeof(PRECISION), cudaMemcpyDeviceToHost));
CHK_ERR( cudaMemcpy(lParList_h.zPos, lParList_d.zPos,
gpuConfig.localCnt * sizeof(PRECISION), cudaMemcpyDeviceToHost));
CHK_ERR( cudaMemcpy(lParList_h.xVel, lParList_d.xVel,
gpuConfig.localCnt * sizeof(PRECISION), cudaMemcpyDeviceToHost));
CHK_ERR( cudaMemcpy(lParList_h.yVel, lParList_d.yVel,
gpuConfig.localCnt * sizeof(PRECISION), cudaMemcpyDeviceToHost));
CHK_ERR( cudaMemcpy(lParList_h.zVel, lParList_d.zVel,
gpuConfig.localCnt * sizeof(PRECISION), cudaMemcpyDeviceToHost));
CHK_ERR( cudaMemcpy(lParList_h.xAcc, lParList_d.xAcc,
gpuConfig.localCnt * sizeof(PRECISION), cudaMemcpyDeviceToHost));
CHK_ERR( cudaMemcpy(lParList_h.yAcc, lParList_d.yAcc,
gpuConfig.localCnt * sizeof(PRECISION), cudaMemcpyDeviceToHost));
CHK_ERR( cudaMemcpy(lParList_h.zAcc, lParList_d.zAcc,
gpuConfig.localCnt * sizeof(PRECISION), cudaMemcpyDeviceToHost));
CHK_ERR( cudaMemcpy(lParList_h.mass, lParList_d.mass,
gpuConfig.localCnt * sizeof(PRECISION), cudaMemcpyDeviceToHost));
hpLocBuf->localCnt = gpuConfig.localCnt;
hpLocBuf->localDisp = gpuConfig.localDisp;
hpLocBuf->pid = gpuConfig.pid;
for (int idx=0;idx<gpuConfig.localCnt;idx++) {
hpLocBuf[idx].xPos = lParList_h.xPos[idx];
hpLocBuf[idx].yPos = lParList_h.yPos[idx];
hpLocBuf[idx].zPos = lParList_h.zPos[idx];
hpLocBuf[idx].xVel = lParList_h.xVel[idx];
hpLocBuf[idx].yVel = lParList_h.yVel[idx];
hpLocBuf[idx].zVel = lParList_h.zVel[idx];
hpLocBuf[idx].xAcc = lParList_h.xAcc[idx];
hpLocBuf[idx].yAcc = lParList_h.yAcc[idx];
hpLocBuf[idx].zAcc = lParList_h.zAcc[idx];
hpLocBuf[idx].mass = lParList_h.mass[idx];
}
return cudaSuccess;
}
cudaError_t InitializeParticleLists() {
//Allocate space for storing entire hpDataset (GPU)
CHK_ERR( cudaMalloc(&(gParList_d.xPos), hpDataset->mNumParticles *
sizeof(PRECISION)));
CHK_ERR( cudaMalloc(&(gParList_d.yPos), hpDataset->mNumParticles *
sizeof(PRECISION)));
CHK_ERR( cudaMalloc(&(gParList_d.zPos), hpDataset->mNumParticles *
sizeof(PRECISION)));
CHK_ERR( cudaMalloc(&(gParList_d.xVel), hpDataset->mNumParticles *
sizeof(PRECISION)));
CHK_ERR( cudaMalloc(&(gParList_d.yVel), hpDataset->mNumParticles *
sizeof(PRECISION)));
CHK_ERR( cudaMalloc(&(gParList_d.zVel), hpDataset->mNumParticles *
sizeof(PRECISION)));
CHK_ERR( cudaMalloc(&(gParList_d.xAcc), hpDataset->mNumParticles *
sizeof(PRECISION)));
CHK_ERR( cudaMalloc(&(gParList_d.yAcc), hpDataset->mNumParticles *
sizeof(PRECISION)));
CHK_ERR( cudaMalloc(&(gParList_d.zAcc), hpDataset->mNumParticles *
sizeof(PRECISION)));
CHK_ERR( cudaMalloc(&(gParList_d.mass), hpDataset->mNumParticles *
sizeof(PRECISION)));
//Allocate local/working space (GPU)
CHK_ERR( cudaMalloc(&(lParList_d.xPos), gpuConfig.localCnt *
sizeof(PRECISION)));
CHK_ERR( cudaMalloc(&(lParList_d.yPos), gpuConfig.localCnt *
sizeof(PRECISION)));
CHK_ERR( cudaMalloc(&(lParList_d.zPos), gpuConfig.localCnt *
sizeof(PRECISION)));
CHK_ERR( cudaMalloc(&(lParList_d.xVel), gpuConfig.localCnt *
sizeof(PRECISION)));
CHK_ERR( cudaMalloc(&(lParList_d.yVel), gpuConfig.localCnt *
sizeof(PRECISION)));
CHK_ERR( cudaMalloc(&(lParList_d.zVel), gpuConfig.localCnt *
sizeof(PRECISION)));
CHK_ERR( cudaMalloc(&(lParList_d.xAcc), gpuConfig.localCnt *
sizeof(PRECISION)));
CHK_ERR( cudaMalloc(&(lParList_d.yAcc), gpuConfig.localCnt *
sizeof(PRECISION)));
CHK_ERR( cudaMalloc(&(lParList_d.zAcc), gpuConfig.localCnt *
sizeof(PRECISION)));
CHK_ERR( cudaMalloc(&(lParList_d.mass), gpuConfig.localCnt *
sizeof(PRECISION)));
#define CHK_HALLOC(var, size) \
do { \
var = new PRECISION [size]; \
if (var == NULL) \
return cudaErrorUnknown; \
} while (0);
//Allocate space for storing entire hpDataset (Host)
CHK_HALLOC(gParList_h.xPos, hpDataset->mNumParticles);
CHK_HALLOC(gParList_h.yPos, hpDataset->mNumParticles);
CHK_HALLOC(gParList_h.zPos, hpDataset->mNumParticles);
CHK_HALLOC(gParList_h.xVel, hpDataset->mNumParticles);
CHK_HALLOC(gParList_h.yVel, hpDataset->mNumParticles);
CHK_HALLOC(gParList_h.zVel, hpDataset->mNumParticles);
CHK_HALLOC(gParList_h.xAcc, hpDataset->mNumParticles);
CHK_HALLOC(gParList_h.yAcc, hpDataset->mNumParticles);
CHK_HALLOC(gParList_h.zAcc, hpDataset->mNumParticles);
CHK_HALLOC(gParList_h.mass, hpDataset->mNumParticles);
//Allocate local/working space (Host)
CHK_HALLOC(lParList_h.xPos, hpDataset->mNumParticles);
CHK_HALLOC(lParList_h.yPos, hpDataset->mNumParticles);
CHK_HALLOC(lParList_h.zPos, hpDataset->mNumParticles);
CHK_HALLOC(lParList_h.xVel, hpDataset->mNumParticles);
CHK_HALLOC(lParList_h.yVel, hpDataset->mNumParticles);
CHK_HALLOC(lParList_h.zVel, hpDataset->mNumParticles);
CHK_HALLOC(lParList_h.xAcc, hpDataset->mNumParticles);
CHK_HALLOC(lParList_h.yAcc, hpDataset->mNumParticles);
CHK_HALLOC(lParList_h.zAcc, hpDataset->mNumParticles);
CHK_HALLOC(lParList_h.mass, hpDataset->mNumParticles);
#undef CHK_HALLOC
// ConvertHostToDevice();
return cudaSuccess;
}
cudaError_t DestroyParticleLists() {
CHK_ERR( cudaFree(gParList_d.xPos));
CHK_ERR( cudaFree(gParList_d.yPos));
CHK_ERR( cudaFree(gParList_d.zPos));
CHK_ERR( cudaFree(gParList_d.xVel));
CHK_ERR( cudaFree(gParList_d.yVel));
CHK_ERR( cudaFree(gParList_d.zVel));
CHK_ERR( cudaFree(gParList_d.xAcc));
CHK_ERR( cudaFree(gParList_d.yAcc));
CHK_ERR( cudaFree(gParList_d.zAcc));
CHK_ERR( cudaFree(gParList_d.mass));
CHK_ERR( cudaFree(lParList_d.xPos));
CHK_ERR( cudaFree(lParList_d.yPos));
CHK_ERR( cudaFree(lParList_d.zPos));
CHK_ERR( cudaFree(lParList_d.xVel));
CHK_ERR( cudaFree(lParList_d.yVel));
CHK_ERR( cudaFree(lParList_d.zVel));
CHK_ERR( cudaFree(lParList_d.xAcc));
CHK_ERR( cudaFree(lParList_d.yAcc));
CHK_ERR( cudaFree(lParList_d.zAcc));
CHK_ERR( cudaFree(lParList_d.mass));
delete [] gParList_h.xPos;
delete [] gParList_h.yPos;
delete [] gParList_h.zPos;
delete [] gParList_h.xVel;
delete [] gParList_h.yVel;
delete [] gParList_h.zVel;
delete [] gParList_h.xAcc;
delete [] gParList_h.yAcc;
delete [] gParList_h.zAcc;
delete [] gParList_h.mass;
delete [] lParList_h.xPos;
delete [] lParList_h.yPos;
delete [] lParList_h.zPos;
delete [] lParList_h.xVel;
delete [] lParList_h.yVel;
delete [] lParList_h.zVel;
delete [] lParList_h.xAcc;
delete [] lParList_h.yAcc;
delete [] lParList_h.zAcc;
delete [] lParList_h.mass;
return cudaSuccess;
}
cudaError_t CudaInitialize(ParticleDataset * dataset,
ParticleDataset::Particle * locBuf,
GPU_ParticleDataset::GPUConfig gConfig) {
hpDataset = dataset;
hpLocBuf = locBuf;
gpuConfig = gConfig;
return cudaSuccess;
}
cudaError_t CudaClean() {
DestroyParticleLists();
return cudaSuccess;
}
cudaError_t ComputeParticleAttributes(ParticleDataset * dataset,
ParticleDataset::Particle * locBuf,
GPU_ParticleDataset::GPUConfig gConfig, PRECISION step, PRECISION grav, int peid, int pid, int localCnt, int localDisp, int numParticles) {
hpDataset = dataset;
hpLocBuf = locBuf;
gpuConfig = gConfig;
gpuConfig.localCnt = localCnt;
gpuConfig.localDisp = localDisp;
gpuConfig.pid = pid;
gpuConfig.grav = grav;
gpuConfig.step = step;
gpuConfig.dev = peid;
int deviceCnt = 0;
CHK_ERR( cudaGetDeviceCount(&deviceCnt));
CHK_ERR( cudaSetDevice(peid % deviceCnt));
CHK_ERR( InitializeParticleLists());
CHK_ERR( ConvertHostToDevice());
char hostname[128];
gethostname(hostname, 128);
cout << "\t\t[" << gpuConfig.pid << "]: cudaSetDevice " <<
peid % deviceCnt << " on [" << hostname << "]"<< endl;
struct timeval tb, te;
// cout << "Launching" << endl;
gettimeofday(&tb, NULL);
#if 1
ComputeParticleAttributes_Kernel <<<16, 256>>> (
gParList_d.xPos, gParList_d.yPos, gParList_d.zPos,
gParList_d.xVel, gParList_d.yVel, gParList_d.zVel,
gParList_d.xAcc, gParList_d.yAcc, gParList_d.zAcc,
gParList_d.mass,
lParList_d.xPos, lParList_d.yPos, lParList_d.zPos,
lParList_d.xVel, lParList_d.yVel, lParList_d.zVel,
lParList_d.xAcc, lParList_d.yAcc, lParList_d.zAcc,
lParList_d.mass,
step, grav,
localCnt, localDisp, numParticles);
CHK_ERR( cudaDeviceSynchronize());
#else
int imax, jmax, kmax;
if (!strcmp(hostname, "c11u11.build.local")) {
imax = 7;
jmax = 1000;
kmax = 1000000;
}
else if (!strcmp(hostname, "c11u13.build.local")) {
imax = 7;
jmax = 1000;
kmax = 1000000;
}
else if (!strcmp(hostname, "c11u15.build.local")) {
imax = 20;
jmax = 1000;
kmax = 1000000;
}
else if (!strcmp(hostname, "c11u17.build.local")) {
imax = 20;
jmax = 1000;
kmax = 1000000;
}
else if (!strcmp(hostname, "c11u19.build.local")) {
imax = 5;
jmax = 1000;
kmax = 1000000;
}
else if (!strcmp(hostname, "c11u21.build.local")) {
imax = 5;
jmax = 1000;
kmax = 1000000;
}
else if (!strcmp(hostname, "c12u9.build.local")) {
imax = 10;
jmax = 1000;
kmax = 1000000;
}
else if (!strcmp(hostname, "c12u11.build.local")) {
imax = 10;
jmax = 1000;
kmax = 1000000;
}
else if (!strcmp(hostname, "c12u13.build.local")) {
imax = 10;
jmax = 1000;
kmax = 1000000;
}
else if (!strcmp(hostname, "c12u15.build.local")) {
imax = 10;
jmax = 1000;
kmax = 1000000;
}
volatile float *a, *b,*c;
a = new volatile float[10000000];
b = new volatile float[10000000];
c = new volatile float[10000000];
for (int i=0;i<imax;i++) {
for (int j=0;j<jmax;j++) {
for (int k=0;k<kmax;k++) {
c[k] = a[k] * b[k];
}
}
}
#endif
gettimeofday(&te, NULL);
cout << "\t***Kernel: " << ((te.tv_usec - tb.tv_usec) * 1e-6 + ((double)te.tv_sec - (double)tb.tv_sec)) << " and number: " << gpuConfig.localCnt<< " and disp: " << gpuConfig.localDisp << " global: " << hpDataset->mNumParticles << endl;
CHK_ERR( ConvertDeviceToHost());
DestroyParticleLists();
// cout << "MMM[" << gpuConfig.pid << "]: " << hpLocBuf[0].xPos<< endl;
return cudaSuccess;
}
|
048703ee01782f0ca507ffa41ccac30d763da2a5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "generateVelB.h"
namespace kernels
{
__global__
void fill_velB(real *velB, real *uB, real *vB, int totalPoints)
{
int k = threadIdx.x + blockIdx.x*blockDim.x;
if(k<totalPoints)
{
velB[k] = uB[k];
velB[k + totalPoints] = vB[k];
}
}
}
| 048703ee01782f0ca507ffa41ccac30d763da2a5.cu | #include "generateVelB.h"
namespace kernels
{
__global__
void fill_velB(real *velB, real *uB, real *vB, int totalPoints)
{
int k = threadIdx.x + blockIdx.x*blockDim.x;
if(k<totalPoints)
{
velB[k] = uB[k];
velB[k + totalPoints] = vB[k];
}
}
}
|
06bc3f3f0cb65b19223f084002171550d0547da1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2019 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: joaander
#include "NeighborListGPUBinned.cuh"
#include "hoomd/TextureTools.h"
#include "hoomd/WarpTools.cuh"
/*! \file NeighborListGPUBinned.cu
\brief Defines GPU kernel code for O(N) neighbor list generation on the GPU
*/
//! Texture for reading d_cell_xyzf
scalar4_tex_t cell_xyzf_1d_tex;
//! Kernel call for generating neighbor list on the GPU (Kepler optimized version)
/*! \tparam flags Set bit 1 to enable body filtering. Set bit 2 to enable diameter filtering.
\param d_nlist Neighbor list data structure to write
\param d_n_neigh Number of neighbors to write
\param d_last_updated_pos Particle positions at this update are written to this array
\param d_conditions Conditions array for writing overflow condition
\param d_Nmax Maximum number of neighbors per type
\param d_head_list List of indexes to access \a d_nlist
\param d_pos Particle positions
\param d_body Particle body indices
\param d_diameter Particle diameters
\param N Number of particles
\param d_cell_size Number of particles in each cell
\param d_cell_xyzf Cell contents (xyzf array from CellList with flag=type)
\param d_cell_idx Cell contents (particle indices)
\param d_cell_tdb Cell contents (tdb array from CellList with)
\param d_cell_adj Cell adjacency list
\param ci Cell indexer for indexing cells
\param cli Cell list indexer for indexing into d_cell_xyzf
\param cadji Adjacent cell indexer listing the 27 neighboring cells
\param box Simulation box dimensions
\param d_r_cut Cutoff radius stored by pair type r_cut(i,j)
\param r_buff The maximum radius for which to include particles as neighbors
\param ntypes Number of particle types
\param ghost_width Width of ghost cell layer
\param offset Starting particle index
\param nwork Number of particles to process
\note optimized for Kepler
*/
template<unsigned char flags, int use_index, int threads_per_particle>
__global__ void gpu_compute_nlist_binned_kernel(unsigned int *d_nlist,
unsigned int *d_n_neigh,
Scalar4 *d_last_updated_pos,
unsigned int *d_conditions,
const unsigned int *d_Nmax,
const unsigned int *d_head_list,
const Scalar4 *d_pos,
const unsigned int *d_body,
const Scalar *d_diameter,
const unsigned int N,
const unsigned int *d_cell_size,
const Scalar4 *d_cell_xyzf,
const unsigned int *d_cell_idx,
const Scalar4 *d_cell_tdb,
const unsigned int *d_cell_adj,
const Index3D ci,
const Index2D cli,
const Index2D cadji,
const BoxDim box,
const Scalar *d_r_cut,
const Scalar r_buff,
const unsigned int ntypes,
const Scalar3 ghost_width,
const unsigned int offset,
const unsigned int nwork,
const unsigned int ngpu)
{
bool filter_body = flags & 1;
bool diameter_shift = flags & 2;
// cache the r_listsq parameters into shared memory
Index2D typpair_idx(ntypes);
const unsigned int num_typ_parameters = typpair_idx.getNumElements();
// shared data for per type pair parameters
extern __shared__ unsigned char s_data[];
// pointer for the r_listsq data
Scalar *s_r_list = (Scalar *)(&s_data[0]);
unsigned int *s_Nmax = (unsigned int *)(&s_data[sizeof(Scalar)*num_typ_parameters]);
// load in the per type pair r_list
for (unsigned int cur_offset = 0; cur_offset < num_typ_parameters; cur_offset += blockDim.x)
{
if (cur_offset + threadIdx.x < num_typ_parameters)
{
Scalar r_cut = d_r_cut[cur_offset + threadIdx.x];
// force the r_list(i,j) to a skippable value if r_cut(i,j) is skippable
s_r_list[cur_offset + threadIdx.x] = (r_cut > Scalar(0.0)) ? r_cut+r_buff : Scalar(-1.0);
}
if (cur_offset + threadIdx.x < ntypes)
{
s_Nmax[cur_offset + threadIdx.x] = d_Nmax[cur_offset + threadIdx.x];
}
}
__syncthreads();
// each set of threads_per_particle threads is going to compute the neighbor list for a single particle
int my_pidx = blockIdx.x * (blockDim.x/threads_per_particle) + threadIdx.x/threads_per_particle;
// one thread per particle
if (my_pidx >= nwork) return;
// get particle index
my_pidx += offset;
Scalar4 my_postype = d_pos[my_pidx];
Scalar3 my_pos = make_scalar3(my_postype.x, my_postype.y, my_postype.z);
unsigned int my_type = __scalar_as_int(my_postype.w);
unsigned int my_body = d_body[my_pidx];
Scalar my_diam = d_diameter[my_pidx];
unsigned int my_head = d_head_list[my_pidx];
Scalar3 f = box.makeFraction(my_pos, ghost_width);
// find the bin each particle belongs in
int ib = (int)(f.x * ci.getW());
int jb = (int)(f.y * ci.getH());
int kb = (int)(f.z * ci.getD());
uchar3 periodic = box.getPeriodic();
// need to handle the case where the particle is exactly at the box hi
if (ib == ci.getW() && periodic.x)
ib = 0;
if (jb == ci.getH() && periodic.y)
jb = 0;
if (kb == ci.getD() && periodic.z)
kb = 0;
int my_cell = ci(ib,jb,kb);
// index of current neighbor
unsigned int cur_adj = 0;
// current device portion in cell list
unsigned int igpu = 0;
// current cell
unsigned int neigh_cell = d_cell_adj[cadji(cur_adj, my_cell)];
// size of current cell
unsigned int neigh_size = d_cell_size[neigh_cell];
// current index in cell
int cur_offset = threadIdx.x % threads_per_particle;
bool done = false;
// total number of neighbors
unsigned int nneigh = 0;
while (! done)
{
// initialize with default
unsigned int neighbor;
unsigned char has_neighbor = 0;
// advance neighbor cell
while (cur_offset >= neigh_size && !done )
{
cur_offset -= neigh_size;
cur_adj++;
if (cur_adj >= cadji.getW())
{
if (++igpu < ngpu)
{
cur_adj = 0;
}
else
{
// we are past the end of the cell neighbors
done = true;
neigh_size = 0;
}
}
if (! done)
{
neigh_cell = d_cell_adj[cadji(cur_adj, my_cell)];
neigh_size = d_cell_size[neigh_cell+igpu*ci.getNumElements()];
}
}
// check for a neighbor if thread is still working
if (!done)
{
Scalar4 cur_xyzf;
unsigned int j;
Scalar4 postype_j;
if (!use_index)
cur_xyzf = texFetchScalar4(d_cell_xyzf, cell_xyzf_1d_tex, cli(cur_offset, neigh_cell));
else
{
j = d_cell_idx[cli(cur_offset, neigh_cell)+igpu*cli.getNumElements()];
postype_j = d_pos[j];
cur_xyzf = make_scalar4(postype_j.x, postype_j.y, postype_j.z, __int_as_scalar(j));
}
Scalar4 cur_tdb;
if (!use_index)
cur_tdb = d_cell_tdb[cli(cur_offset, neigh_cell)];
else
cur_tdb = make_scalar4(postype_j.w, d_diameter[j], __int_as_scalar(d_body[j]),0);
// advance cur_offset
cur_offset += threads_per_particle;
unsigned int neigh_type = __scalar_as_int(cur_tdb.x);
// Only do the hard work if the particle should be included by r_cut(i,j)
Scalar r_list = s_r_list[typpair_idx(my_type,neigh_type)];
if (r_list > Scalar(0.0))
{
Scalar neigh_diam = cur_tdb.y;
unsigned int neigh_body = __scalar_as_int(cur_tdb.z);
Scalar3 neigh_pos = make_scalar3(cur_xyzf.x,
cur_xyzf.y,
cur_xyzf.z);
int cur_neigh = __scalar_as_int(cur_xyzf.w);
// compute the distance between the two particles
Scalar3 dx = my_pos - neigh_pos;
// wrap the periodic boundary conditions
dx = box.minImage(dx);
// compute dr squared
Scalar drsq = dot(dx,dx);
bool excluded = (my_pidx == cur_neigh);
if (filter_body && my_body != 0xffffffff)
excluded = excluded | (my_body == neigh_body);
Scalar sqshift = Scalar(0.0);
if (diameter_shift)
{
const Scalar delta = (my_diam + neigh_diam) * Scalar(0.5) - Scalar(1.0);
// r^2 < (r_list + delta)^2
// r^2 < r_listsq + delta^2 + 2*r_list*delta
sqshift = (delta + Scalar(2.0) * r_list) * delta;
}
// store result in shared memory
if (drsq <= (r_list*r_list + sqshift) && !excluded)
{
neighbor = cur_neigh;
has_neighbor = 1;
}
}
}
// now that possible neighbor checks are finished, done (for the cta) depends only on first thread
// neighbor list only needs to get written into if thread 0 is not done
done = hoomd::detail::WarpScan<bool, threads_per_particle>().Broadcast(done, 0);
if (!done)
{
// scan over flags
unsigned char k(0), n(0);
hoomd::detail::WarpScan<unsigned char, threads_per_particle>().ExclusiveSum(has_neighbor, k, n);
// write neighbor if it fits in list
if (has_neighbor && (nneigh + k) < s_Nmax[my_type])
d_nlist[my_head + nneigh + k] = neighbor;
// increment total neighbor count
nneigh += n;
}
} // end while
if (threadIdx.x % threads_per_particle == 0)
{
// flag if we need to grow the neighbor list
if (nneigh >= s_Nmax[my_type])
atomicMax(&d_conditions[my_type], nneigh);
d_n_neigh[my_pidx] = nneigh;
d_last_updated_pos[my_pidx] = my_postype;
}
}
//! determine maximum possible block size
template<typename T>
int get_max_block_size(T func)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)func);
int max_threads = attr.maxThreadsPerBlock;
// number of threads has to be multiple of warp size
max_threads -= max_threads % max_threads_per_particle;
return max_threads;
}
void gpu_nlist_binned_bind_texture(const Scalar4 *d_cell_xyzf, unsigned int n_elements)
{
// bind the position texture
cell_xyzf_1d_tex.normalized = false;
cell_xyzf_1d_tex.filterMode = hipFilterModePoint;
hipBindTexture(0, cell_xyzf_1d_tex, d_cell_xyzf, sizeof(Scalar4)*n_elements);
}
//! recursive template to launch neighborlist with given template parameters
/* \tparam cur_tpp Number of threads per particle (assumed to be power of two) */
template<int cur_tpp>
inline void launcher(unsigned int *d_nlist,
unsigned int *d_n_neigh,
Scalar4 *d_last_updated_pos,
unsigned int *d_conditions,
const unsigned int *d_Nmax,
const unsigned int *d_head_list,
const Scalar4 *d_pos,
const unsigned int *d_body,
const Scalar *d_diameter,
const unsigned int N,
const unsigned int *d_cell_size,
const Scalar4 *d_cell_xyzf,
const unsigned int *d_cell_idx,
const Scalar4 *d_cell_tdb,
const unsigned int *d_cell_adj,
const Index3D ci,
const Index2D cli,
const Index2D cadji,
const BoxDim box,
const Scalar *d_r_cut,
const Scalar r_buff,
const unsigned int ntypes,
const Scalar3 ghost_width,
const unsigned int compute_capability,
unsigned int tpp,
bool filter_body,
bool diameter_shift,
unsigned int block_size,
std::pair<unsigned int, unsigned int> range,
bool use_index,
const unsigned int ngpu)
{
// shared memory = r_listsq + Nmax + stuff needed for neighborlist (computed below)
Index2D typpair_idx(ntypes);
unsigned int shared_size = sizeof(Scalar)*typpair_idx.getNumElements() + sizeof(unsigned int)*ntypes;
unsigned int offset = range.first;
unsigned int nwork = range.second - range.first;
if (tpp == cur_tpp && cur_tpp != 0)
{
if (!use_index)
{
if (!diameter_shift && !filter_body)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
max_block_size = get_max_block_size(gpu_compute_nlist_binned_kernel<0,0,cur_tpp>);
if (compute_capability < 35) gpu_nlist_binned_bind_texture(d_cell_xyzf, cli.getNumElements());
block_size = block_size < max_block_size ? block_size : max_block_size;
dim3 grid(nwork / (block_size/tpp) + 1);
hipLaunchKernelGGL(( gpu_compute_nlist_binned_kernel<0,0,cur_tpp>), dim3(grid), dim3(block_size),shared_size, 0, d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
d_pos,
d_body,
d_diameter,
N,
d_cell_size,
d_cell_xyzf,
d_cell_idx,
d_cell_tdb,
d_cell_adj,
ci,
cli,
cadji,
box,
d_r_cut,
r_buff,
ntypes,
ghost_width,
offset,
nwork,
ngpu);
}
else if (!diameter_shift && filter_body)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
max_block_size = get_max_block_size(gpu_compute_nlist_binned_kernel<1,0,cur_tpp>);
if (compute_capability < 35) gpu_nlist_binned_bind_texture(d_cell_xyzf, cli.getNumElements());
block_size = block_size < max_block_size ? block_size : max_block_size;
dim3 grid(nwork / (block_size/tpp) + 1);
hipLaunchKernelGGL(( gpu_compute_nlist_binned_kernel<1,0,cur_tpp>), dim3(grid), dim3(block_size),shared_size, 0, d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
d_pos,
d_body,
d_diameter,
N,
d_cell_size,
d_cell_xyzf,
d_cell_idx,
d_cell_tdb,
d_cell_adj,
ci,
cli,
cadji,
box,
d_r_cut,
r_buff,
ntypes,
ghost_width,
offset,
nwork,
ngpu);
}
else if (diameter_shift && !filter_body)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
max_block_size = get_max_block_size(gpu_compute_nlist_binned_kernel<2,0,cur_tpp>);
if (compute_capability < 35) gpu_nlist_binned_bind_texture(d_cell_xyzf, cli.getNumElements());
block_size = block_size < max_block_size ? block_size : max_block_size;
dim3 grid(nwork / (block_size/tpp) + 1);
hipLaunchKernelGGL(( gpu_compute_nlist_binned_kernel<2,0,cur_tpp>), dim3(grid), dim3(block_size),shared_size, 0, d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
d_pos,
d_body,
d_diameter,
N,
d_cell_size,
d_cell_xyzf,
d_cell_idx,
d_cell_tdb,
d_cell_adj,
ci,
cli,
cadji,
box,
d_r_cut,
r_buff,
ntypes,
ghost_width,
offset,
nwork,
ngpu);
}
else if (diameter_shift && filter_body)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
max_block_size = get_max_block_size(gpu_compute_nlist_binned_kernel<3,0,cur_tpp>);
if (compute_capability < 35) gpu_nlist_binned_bind_texture(d_cell_xyzf, cli.getNumElements());
block_size = block_size < max_block_size ? block_size : max_block_size;
dim3 grid(nwork / (block_size/tpp) + 1);
hipLaunchKernelGGL(( gpu_compute_nlist_binned_kernel<3,0,cur_tpp>), dim3(grid), dim3(block_size),shared_size, 0, d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
d_pos,
d_body,
d_diameter,
N,
d_cell_size,
d_cell_xyzf,
d_cell_idx,
d_cell_tdb,
d_cell_adj,
ci,
cli,
cadji,
box,
d_r_cut,
r_buff,
ntypes,
ghost_width,
offset,
nwork,
ngpu);
}
}
else // use_index
{
if (!diameter_shift && !filter_body)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
max_block_size = get_max_block_size(gpu_compute_nlist_binned_kernel<0,1,cur_tpp>);
if (compute_capability < 35) gpu_nlist_binned_bind_texture(d_cell_xyzf, cli.getNumElements());
block_size = block_size < max_block_size ? block_size : max_block_size;
dim3 grid(nwork / (block_size/tpp) + 1);
hipLaunchKernelGGL(( gpu_compute_nlist_binned_kernel<0,1,cur_tpp>), dim3(grid), dim3(block_size),shared_size, 0, d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
d_pos,
d_body,
d_diameter,
N,
d_cell_size,
d_cell_xyzf,
d_cell_idx,
d_cell_tdb,
d_cell_adj,
ci,
cli,
cadji,
box,
d_r_cut,
r_buff,
ntypes,
ghost_width,
offset,
nwork,
ngpu);
}
else if (!diameter_shift && filter_body)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
max_block_size = get_max_block_size(gpu_compute_nlist_binned_kernel<1,1,cur_tpp>);
if (compute_capability < 35) gpu_nlist_binned_bind_texture(d_cell_xyzf, cli.getNumElements());
block_size = block_size < max_block_size ? block_size : max_block_size;
dim3 grid(nwork / (block_size/tpp) + 1);
hipLaunchKernelGGL(( gpu_compute_nlist_binned_kernel<1,1,cur_tpp>), dim3(grid), dim3(block_size),shared_size, 0, d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
d_pos,
d_body,
d_diameter,
N,
d_cell_size,
d_cell_xyzf,
d_cell_idx,
d_cell_tdb,
d_cell_adj,
ci,
cli,
cadji,
box,
d_r_cut,
r_buff,
ntypes,
ghost_width,
offset,
nwork,
ngpu);
}
else if (diameter_shift && !filter_body)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
max_block_size = get_max_block_size(gpu_compute_nlist_binned_kernel<2,1,cur_tpp>);
if (compute_capability < 35) gpu_nlist_binned_bind_texture(d_cell_xyzf, cli.getNumElements());
block_size = block_size < max_block_size ? block_size : max_block_size;
dim3 grid(nwork / (block_size/tpp) + 1);
hipLaunchKernelGGL(( gpu_compute_nlist_binned_kernel<2,1,cur_tpp>), dim3(grid), dim3(block_size),shared_size, 0, d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
d_pos,
d_body,
d_diameter,
N,
d_cell_size,
d_cell_xyzf,
d_cell_idx,
d_cell_tdb,
d_cell_adj,
ci,
cli,
cadji,
box,
d_r_cut,
r_buff,
ntypes,
ghost_width,
offset,
nwork,
ngpu);
}
else if (diameter_shift && filter_body)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
max_block_size = get_max_block_size(gpu_compute_nlist_binned_kernel<3,1,cur_tpp>);
if (compute_capability < 35) gpu_nlist_binned_bind_texture(d_cell_xyzf, cli.getNumElements());
block_size = block_size < max_block_size ? block_size : max_block_size;
dim3 grid(nwork / (block_size/tpp) + 1);
hipLaunchKernelGGL(( gpu_compute_nlist_binned_kernel<3,1,cur_tpp>), dim3(grid), dim3(block_size),shared_size, 0, d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
d_pos,
d_body,
d_diameter,
N,
d_cell_size,
d_cell_xyzf,
d_cell_idx,
d_cell_tdb,
d_cell_adj,
ci,
cli,
cadji,
box,
d_r_cut,
r_buff,
ntypes,
ghost_width,
offset,
nwork,
ngpu);
}
}
}
else
{
launcher<cur_tpp/2>(d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
d_pos,
d_body,
d_diameter,
N,
d_cell_size,
d_cell_xyzf,
d_cell_idx,
d_cell_tdb,
d_cell_adj,
ci,
cli,
cadji,
box,
d_r_cut,
r_buff,
ntypes,
ghost_width,
compute_capability,
tpp,
filter_body,
diameter_shift,
block_size,
range,
use_index,
ngpu
);
}
}
//! template specialization to terminate recursion
template<>
inline void launcher<min_threads_per_particle/2>(unsigned int *d_nlist,
unsigned int *d_n_neigh,
Scalar4 *d_last_updated_pos,
unsigned int *d_conditions,
const unsigned int *d_Nmax,
const unsigned int *d_head_list,
const Scalar4 *d_pos,
const unsigned int *d_body,
const Scalar *d_diameter,
const unsigned int N,
const unsigned int *d_cell_size,
const Scalar4 *d_cell_xyzf,
const unsigned int *d_cell_idx,
const Scalar4 *d_cell_tdb,
const unsigned int *d_cell_adj,
const Index3D ci,
const Index2D cli,
const Index2D cadji,
const BoxDim box,
const Scalar *d_r_cut,
const Scalar r_buff,
const unsigned int ntypes,
const Scalar3 ghost_width,
const unsigned int compute_capability,
unsigned int tpp,
bool filter_body,
bool diameter_shift,
unsigned int block_size,
std::pair<unsigned int, unsigned int> range,
bool use_index,
const unsigned int ngpu)
{ }
hipError_t gpu_compute_nlist_binned(unsigned int *d_nlist,
unsigned int *d_n_neigh,
Scalar4 *d_last_updated_pos,
unsigned int *d_conditions,
const unsigned int *d_Nmax,
const unsigned int *d_head_list,
const Scalar4 *d_pos,
const unsigned int *d_body,
const Scalar *d_diameter,
const unsigned int N,
const unsigned int *d_cell_size,
const Scalar4 *d_cell_xyzf,
const unsigned int *d_cell_idx,
const Scalar4 *d_cell_tdb,
const unsigned int *d_cell_adj,
const Index3D& ci,
const Index2D& cli,
const Index2D& cadji,
const BoxDim& box,
const Scalar *d_r_cut,
const Scalar r_buff,
const unsigned int ntypes,
const unsigned int threads_per_particle,
const unsigned int block_size,
bool filter_body,
bool diameter_shift,
const Scalar3& ghost_width,
const unsigned int compute_capability,
const GPUPartition& gpu_partition,
bool use_index)
{
unsigned int ngpu = gpu_partition.getNumActiveGPUs();
// iterate over active GPUs in reverse, to end up on first GPU when returning from this function
for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev)
{
auto range = gpu_partition.getRangeAndSetGPU(idev);
launcher<max_threads_per_particle>(d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
d_pos,
d_body,
d_diameter,
N,
d_cell_size,
d_cell_xyzf,
d_cell_idx,
d_cell_tdb,
d_cell_adj,
ci,
cli,
cadji,
box,
d_r_cut,
r_buff,
ntypes,
ghost_width,
compute_capability,
threads_per_particle,
filter_body,
diameter_shift,
block_size,
range,
use_index,
ngpu
);
}
return hipSuccess;
}
| 06bc3f3f0cb65b19223f084002171550d0547da1.cu | // Copyright (c) 2009-2019 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: joaander
#include "NeighborListGPUBinned.cuh"
#include "hoomd/TextureTools.h"
#include "hoomd/WarpTools.cuh"
/*! \file NeighborListGPUBinned.cu
\brief Defines GPU kernel code for O(N) neighbor list generation on the GPU
*/
//! Texture for reading d_cell_xyzf
scalar4_tex_t cell_xyzf_1d_tex;
//! Kernel call for generating neighbor list on the GPU (Kepler optimized version)
/*! \tparam flags Set bit 1 to enable body filtering. Set bit 2 to enable diameter filtering.
\param d_nlist Neighbor list data structure to write
\param d_n_neigh Number of neighbors to write
\param d_last_updated_pos Particle positions at this update are written to this array
\param d_conditions Conditions array for writing overflow condition
\param d_Nmax Maximum number of neighbors per type
\param d_head_list List of indexes to access \a d_nlist
\param d_pos Particle positions
\param d_body Particle body indices
\param d_diameter Particle diameters
\param N Number of particles
\param d_cell_size Number of particles in each cell
\param d_cell_xyzf Cell contents (xyzf array from CellList with flag=type)
\param d_cell_idx Cell contents (particle indices)
\param d_cell_tdb Cell contents (tdb array from CellList with)
\param d_cell_adj Cell adjacency list
\param ci Cell indexer for indexing cells
\param cli Cell list indexer for indexing into d_cell_xyzf
\param cadji Adjacent cell indexer listing the 27 neighboring cells
\param box Simulation box dimensions
\param d_r_cut Cutoff radius stored by pair type r_cut(i,j)
\param r_buff The maximum radius for which to include particles as neighbors
\param ntypes Number of particle types
\param ghost_width Width of ghost cell layer
\param offset Starting particle index
\param nwork Number of particles to process
\note optimized for Kepler
*/
template<unsigned char flags, int use_index, int threads_per_particle>
__global__ void gpu_compute_nlist_binned_kernel(unsigned int *d_nlist,
unsigned int *d_n_neigh,
Scalar4 *d_last_updated_pos,
unsigned int *d_conditions,
const unsigned int *d_Nmax,
const unsigned int *d_head_list,
const Scalar4 *d_pos,
const unsigned int *d_body,
const Scalar *d_diameter,
const unsigned int N,
const unsigned int *d_cell_size,
const Scalar4 *d_cell_xyzf,
const unsigned int *d_cell_idx,
const Scalar4 *d_cell_tdb,
const unsigned int *d_cell_adj,
const Index3D ci,
const Index2D cli,
const Index2D cadji,
const BoxDim box,
const Scalar *d_r_cut,
const Scalar r_buff,
const unsigned int ntypes,
const Scalar3 ghost_width,
const unsigned int offset,
const unsigned int nwork,
const unsigned int ngpu)
{
bool filter_body = flags & 1;
bool diameter_shift = flags & 2;
// cache the r_listsq parameters into shared memory
Index2D typpair_idx(ntypes);
const unsigned int num_typ_parameters = typpair_idx.getNumElements();
// shared data for per type pair parameters
extern __shared__ unsigned char s_data[];
// pointer for the r_listsq data
Scalar *s_r_list = (Scalar *)(&s_data[0]);
unsigned int *s_Nmax = (unsigned int *)(&s_data[sizeof(Scalar)*num_typ_parameters]);
// load in the per type pair r_list
for (unsigned int cur_offset = 0; cur_offset < num_typ_parameters; cur_offset += blockDim.x)
{
if (cur_offset + threadIdx.x < num_typ_parameters)
{
Scalar r_cut = d_r_cut[cur_offset + threadIdx.x];
// force the r_list(i,j) to a skippable value if r_cut(i,j) is skippable
s_r_list[cur_offset + threadIdx.x] = (r_cut > Scalar(0.0)) ? r_cut+r_buff : Scalar(-1.0);
}
if (cur_offset + threadIdx.x < ntypes)
{
s_Nmax[cur_offset + threadIdx.x] = d_Nmax[cur_offset + threadIdx.x];
}
}
__syncthreads();
// each set of threads_per_particle threads is going to compute the neighbor list for a single particle
int my_pidx = blockIdx.x * (blockDim.x/threads_per_particle) + threadIdx.x/threads_per_particle;
// one thread per particle
if (my_pidx >= nwork) return;
// get particle index
my_pidx += offset;
Scalar4 my_postype = d_pos[my_pidx];
Scalar3 my_pos = make_scalar3(my_postype.x, my_postype.y, my_postype.z);
unsigned int my_type = __scalar_as_int(my_postype.w);
unsigned int my_body = d_body[my_pidx];
Scalar my_diam = d_diameter[my_pidx];
unsigned int my_head = d_head_list[my_pidx];
Scalar3 f = box.makeFraction(my_pos, ghost_width);
// find the bin each particle belongs in
int ib = (int)(f.x * ci.getW());
int jb = (int)(f.y * ci.getH());
int kb = (int)(f.z * ci.getD());
uchar3 periodic = box.getPeriodic();
// need to handle the case where the particle is exactly at the box hi
if (ib == ci.getW() && periodic.x)
ib = 0;
if (jb == ci.getH() && periodic.y)
jb = 0;
if (kb == ci.getD() && periodic.z)
kb = 0;
int my_cell = ci(ib,jb,kb);
// index of current neighbor
unsigned int cur_adj = 0;
// current device portion in cell list
unsigned int igpu = 0;
// current cell
unsigned int neigh_cell = d_cell_adj[cadji(cur_adj, my_cell)];
// size of current cell
unsigned int neigh_size = d_cell_size[neigh_cell];
// current index in cell
int cur_offset = threadIdx.x % threads_per_particle;
bool done = false;
// total number of neighbors
unsigned int nneigh = 0;
while (! done)
{
// initialize with default
unsigned int neighbor;
unsigned char has_neighbor = 0;
// advance neighbor cell
while (cur_offset >= neigh_size && !done )
{
cur_offset -= neigh_size;
cur_adj++;
if (cur_adj >= cadji.getW())
{
if (++igpu < ngpu)
{
cur_adj = 0;
}
else
{
// we are past the end of the cell neighbors
done = true;
neigh_size = 0;
}
}
if (! done)
{
neigh_cell = d_cell_adj[cadji(cur_adj, my_cell)];
neigh_size = d_cell_size[neigh_cell+igpu*ci.getNumElements()];
}
}
// check for a neighbor if thread is still working
if (!done)
{
Scalar4 cur_xyzf;
unsigned int j;
Scalar4 postype_j;
if (!use_index)
cur_xyzf = texFetchScalar4(d_cell_xyzf, cell_xyzf_1d_tex, cli(cur_offset, neigh_cell));
else
{
j = d_cell_idx[cli(cur_offset, neigh_cell)+igpu*cli.getNumElements()];
postype_j = d_pos[j];
cur_xyzf = make_scalar4(postype_j.x, postype_j.y, postype_j.z, __int_as_scalar(j));
}
Scalar4 cur_tdb;
if (!use_index)
cur_tdb = d_cell_tdb[cli(cur_offset, neigh_cell)];
else
cur_tdb = make_scalar4(postype_j.w, d_diameter[j], __int_as_scalar(d_body[j]),0);
// advance cur_offset
cur_offset += threads_per_particle;
unsigned int neigh_type = __scalar_as_int(cur_tdb.x);
// Only do the hard work if the particle should be included by r_cut(i,j)
Scalar r_list = s_r_list[typpair_idx(my_type,neigh_type)];
if (r_list > Scalar(0.0))
{
Scalar neigh_diam = cur_tdb.y;
unsigned int neigh_body = __scalar_as_int(cur_tdb.z);
Scalar3 neigh_pos = make_scalar3(cur_xyzf.x,
cur_xyzf.y,
cur_xyzf.z);
int cur_neigh = __scalar_as_int(cur_xyzf.w);
// compute the distance between the two particles
Scalar3 dx = my_pos - neigh_pos;
// wrap the periodic boundary conditions
dx = box.minImage(dx);
// compute dr squared
Scalar drsq = dot(dx,dx);
bool excluded = (my_pidx == cur_neigh);
if (filter_body && my_body != 0xffffffff)
excluded = excluded | (my_body == neigh_body);
Scalar sqshift = Scalar(0.0);
if (diameter_shift)
{
const Scalar delta = (my_diam + neigh_diam) * Scalar(0.5) - Scalar(1.0);
// r^2 < (r_list + delta)^2
// r^2 < r_listsq + delta^2 + 2*r_list*delta
sqshift = (delta + Scalar(2.0) * r_list) * delta;
}
// store result in shared memory
if (drsq <= (r_list*r_list + sqshift) && !excluded)
{
neighbor = cur_neigh;
has_neighbor = 1;
}
}
}
// now that possible neighbor checks are finished, done (for the cta) depends only on first thread
// neighbor list only needs to get written into if thread 0 is not done
done = hoomd::detail::WarpScan<bool, threads_per_particle>().Broadcast(done, 0);
if (!done)
{
// scan over flags
unsigned char k(0), n(0);
hoomd::detail::WarpScan<unsigned char, threads_per_particle>().ExclusiveSum(has_neighbor, k, n);
// write neighbor if it fits in list
if (has_neighbor && (nneigh + k) < s_Nmax[my_type])
d_nlist[my_head + nneigh + k] = neighbor;
// increment total neighbor count
nneigh += n;
}
} // end while
if (threadIdx.x % threads_per_particle == 0)
{
// flag if we need to grow the neighbor list
if (nneigh >= s_Nmax[my_type])
atomicMax(&d_conditions[my_type], nneigh);
d_n_neigh[my_pidx] = nneigh;
d_last_updated_pos[my_pidx] = my_postype;
}
}
//! determine maximum possible block size
template<typename T>
int get_max_block_size(T func)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void*)func);
int max_threads = attr.maxThreadsPerBlock;
// number of threads has to be multiple of warp size
max_threads -= max_threads % max_threads_per_particle;
return max_threads;
}
void gpu_nlist_binned_bind_texture(const Scalar4 *d_cell_xyzf, unsigned int n_elements)
{
// bind the position texture
cell_xyzf_1d_tex.normalized = false;
cell_xyzf_1d_tex.filterMode = cudaFilterModePoint;
cudaBindTexture(0, cell_xyzf_1d_tex, d_cell_xyzf, sizeof(Scalar4)*n_elements);
}
//! recursive template to launch neighborlist with given template parameters
/* \tparam cur_tpp Number of threads per particle (assumed to be power of two) */
template<int cur_tpp>
inline void launcher(unsigned int *d_nlist,
unsigned int *d_n_neigh,
Scalar4 *d_last_updated_pos,
unsigned int *d_conditions,
const unsigned int *d_Nmax,
const unsigned int *d_head_list,
const Scalar4 *d_pos,
const unsigned int *d_body,
const Scalar *d_diameter,
const unsigned int N,
const unsigned int *d_cell_size,
const Scalar4 *d_cell_xyzf,
const unsigned int *d_cell_idx,
const Scalar4 *d_cell_tdb,
const unsigned int *d_cell_adj,
const Index3D ci,
const Index2D cli,
const Index2D cadji,
const BoxDim box,
const Scalar *d_r_cut,
const Scalar r_buff,
const unsigned int ntypes,
const Scalar3 ghost_width,
const unsigned int compute_capability,
unsigned int tpp,
bool filter_body,
bool diameter_shift,
unsigned int block_size,
std::pair<unsigned int, unsigned int> range,
bool use_index,
const unsigned int ngpu)
{
// shared memory = r_listsq + Nmax + stuff needed for neighborlist (computed below)
Index2D typpair_idx(ntypes);
unsigned int shared_size = sizeof(Scalar)*typpair_idx.getNumElements() + sizeof(unsigned int)*ntypes;
unsigned int offset = range.first;
unsigned int nwork = range.second - range.first;
if (tpp == cur_tpp && cur_tpp != 0)
{
if (!use_index)
{
if (!diameter_shift && !filter_body)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
max_block_size = get_max_block_size(gpu_compute_nlist_binned_kernel<0,0,cur_tpp>);
if (compute_capability < 35) gpu_nlist_binned_bind_texture(d_cell_xyzf, cli.getNumElements());
block_size = block_size < max_block_size ? block_size : max_block_size;
dim3 grid(nwork / (block_size/tpp) + 1);
gpu_compute_nlist_binned_kernel<0,0,cur_tpp><<<grid, block_size,shared_size>>>(d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
d_pos,
d_body,
d_diameter,
N,
d_cell_size,
d_cell_xyzf,
d_cell_idx,
d_cell_tdb,
d_cell_adj,
ci,
cli,
cadji,
box,
d_r_cut,
r_buff,
ntypes,
ghost_width,
offset,
nwork,
ngpu);
}
else if (!diameter_shift && filter_body)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
max_block_size = get_max_block_size(gpu_compute_nlist_binned_kernel<1,0,cur_tpp>);
if (compute_capability < 35) gpu_nlist_binned_bind_texture(d_cell_xyzf, cli.getNumElements());
block_size = block_size < max_block_size ? block_size : max_block_size;
dim3 grid(nwork / (block_size/tpp) + 1);
gpu_compute_nlist_binned_kernel<1,0,cur_tpp><<<grid, block_size,shared_size>>>(d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
d_pos,
d_body,
d_diameter,
N,
d_cell_size,
d_cell_xyzf,
d_cell_idx,
d_cell_tdb,
d_cell_adj,
ci,
cli,
cadji,
box,
d_r_cut,
r_buff,
ntypes,
ghost_width,
offset,
nwork,
ngpu);
}
else if (diameter_shift && !filter_body)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
max_block_size = get_max_block_size(gpu_compute_nlist_binned_kernel<2,0,cur_tpp>);
if (compute_capability < 35) gpu_nlist_binned_bind_texture(d_cell_xyzf, cli.getNumElements());
block_size = block_size < max_block_size ? block_size : max_block_size;
dim3 grid(nwork / (block_size/tpp) + 1);
gpu_compute_nlist_binned_kernel<2,0,cur_tpp><<<grid, block_size,shared_size>>>(d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
d_pos,
d_body,
d_diameter,
N,
d_cell_size,
d_cell_xyzf,
d_cell_idx,
d_cell_tdb,
d_cell_adj,
ci,
cli,
cadji,
box,
d_r_cut,
r_buff,
ntypes,
ghost_width,
offset,
nwork,
ngpu);
}
else if (diameter_shift && filter_body)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
max_block_size = get_max_block_size(gpu_compute_nlist_binned_kernel<3,0,cur_tpp>);
if (compute_capability < 35) gpu_nlist_binned_bind_texture(d_cell_xyzf, cli.getNumElements());
block_size = block_size < max_block_size ? block_size : max_block_size;
dim3 grid(nwork / (block_size/tpp) + 1);
gpu_compute_nlist_binned_kernel<3,0,cur_tpp><<<grid, block_size,shared_size>>>(d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
d_pos,
d_body,
d_diameter,
N,
d_cell_size,
d_cell_xyzf,
d_cell_idx,
d_cell_tdb,
d_cell_adj,
ci,
cli,
cadji,
box,
d_r_cut,
r_buff,
ntypes,
ghost_width,
offset,
nwork,
ngpu);
}
}
else // use_index
{
if (!diameter_shift && !filter_body)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
max_block_size = get_max_block_size(gpu_compute_nlist_binned_kernel<0,1,cur_tpp>);
if (compute_capability < 35) gpu_nlist_binned_bind_texture(d_cell_xyzf, cli.getNumElements());
block_size = block_size < max_block_size ? block_size : max_block_size;
dim3 grid(nwork / (block_size/tpp) + 1);
gpu_compute_nlist_binned_kernel<0,1,cur_tpp><<<grid, block_size,shared_size>>>(d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
d_pos,
d_body,
d_diameter,
N,
d_cell_size,
d_cell_xyzf,
d_cell_idx,
d_cell_tdb,
d_cell_adj,
ci,
cli,
cadji,
box,
d_r_cut,
r_buff,
ntypes,
ghost_width,
offset,
nwork,
ngpu);
}
else if (!diameter_shift && filter_body)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
max_block_size = get_max_block_size(gpu_compute_nlist_binned_kernel<1,1,cur_tpp>);
if (compute_capability < 35) gpu_nlist_binned_bind_texture(d_cell_xyzf, cli.getNumElements());
block_size = block_size < max_block_size ? block_size : max_block_size;
dim3 grid(nwork / (block_size/tpp) + 1);
gpu_compute_nlist_binned_kernel<1,1,cur_tpp><<<grid, block_size,shared_size>>>(d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
d_pos,
d_body,
d_diameter,
N,
d_cell_size,
d_cell_xyzf,
d_cell_idx,
d_cell_tdb,
d_cell_adj,
ci,
cli,
cadji,
box,
d_r_cut,
r_buff,
ntypes,
ghost_width,
offset,
nwork,
ngpu);
}
else if (diameter_shift && !filter_body)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
max_block_size = get_max_block_size(gpu_compute_nlist_binned_kernel<2,1,cur_tpp>);
if (compute_capability < 35) gpu_nlist_binned_bind_texture(d_cell_xyzf, cli.getNumElements());
block_size = block_size < max_block_size ? block_size : max_block_size;
dim3 grid(nwork / (block_size/tpp) + 1);
gpu_compute_nlist_binned_kernel<2,1,cur_tpp><<<grid, block_size,shared_size>>>(d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
d_pos,
d_body,
d_diameter,
N,
d_cell_size,
d_cell_xyzf,
d_cell_idx,
d_cell_tdb,
d_cell_adj,
ci,
cli,
cadji,
box,
d_r_cut,
r_buff,
ntypes,
ghost_width,
offset,
nwork,
ngpu);
}
else if (diameter_shift && filter_body)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
max_block_size = get_max_block_size(gpu_compute_nlist_binned_kernel<3,1,cur_tpp>);
if (compute_capability < 35) gpu_nlist_binned_bind_texture(d_cell_xyzf, cli.getNumElements());
block_size = block_size < max_block_size ? block_size : max_block_size;
dim3 grid(nwork / (block_size/tpp) + 1);
gpu_compute_nlist_binned_kernel<3,1,cur_tpp><<<grid, block_size,shared_size>>>(d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
d_pos,
d_body,
d_diameter,
N,
d_cell_size,
d_cell_xyzf,
d_cell_idx,
d_cell_tdb,
d_cell_adj,
ci,
cli,
cadji,
box,
d_r_cut,
r_buff,
ntypes,
ghost_width,
offset,
nwork,
ngpu);
}
}
}
else
{
launcher<cur_tpp/2>(d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
d_pos,
d_body,
d_diameter,
N,
d_cell_size,
d_cell_xyzf,
d_cell_idx,
d_cell_tdb,
d_cell_adj,
ci,
cli,
cadji,
box,
d_r_cut,
r_buff,
ntypes,
ghost_width,
compute_capability,
tpp,
filter_body,
diameter_shift,
block_size,
range,
use_index,
ngpu
);
}
}
//! template specialization to terminate recursion
template<>
inline void launcher<min_threads_per_particle/2>(unsigned int *d_nlist,
unsigned int *d_n_neigh,
Scalar4 *d_last_updated_pos,
unsigned int *d_conditions,
const unsigned int *d_Nmax,
const unsigned int *d_head_list,
const Scalar4 *d_pos,
const unsigned int *d_body,
const Scalar *d_diameter,
const unsigned int N,
const unsigned int *d_cell_size,
const Scalar4 *d_cell_xyzf,
const unsigned int *d_cell_idx,
const Scalar4 *d_cell_tdb,
const unsigned int *d_cell_adj,
const Index3D ci,
const Index2D cli,
const Index2D cadji,
const BoxDim box,
const Scalar *d_r_cut,
const Scalar r_buff,
const unsigned int ntypes,
const Scalar3 ghost_width,
const unsigned int compute_capability,
unsigned int tpp,
bool filter_body,
bool diameter_shift,
unsigned int block_size,
std::pair<unsigned int, unsigned int> range,
bool use_index,
const unsigned int ngpu)
{ }
cudaError_t gpu_compute_nlist_binned(unsigned int *d_nlist,
unsigned int *d_n_neigh,
Scalar4 *d_last_updated_pos,
unsigned int *d_conditions,
const unsigned int *d_Nmax,
const unsigned int *d_head_list,
const Scalar4 *d_pos,
const unsigned int *d_body,
const Scalar *d_diameter,
const unsigned int N,
const unsigned int *d_cell_size,
const Scalar4 *d_cell_xyzf,
const unsigned int *d_cell_idx,
const Scalar4 *d_cell_tdb,
const unsigned int *d_cell_adj,
const Index3D& ci,
const Index2D& cli,
const Index2D& cadji,
const BoxDim& box,
const Scalar *d_r_cut,
const Scalar r_buff,
const unsigned int ntypes,
const unsigned int threads_per_particle,
const unsigned int block_size,
bool filter_body,
bool diameter_shift,
const Scalar3& ghost_width,
const unsigned int compute_capability,
const GPUPartition& gpu_partition,
bool use_index)
{
unsigned int ngpu = gpu_partition.getNumActiveGPUs();
// iterate over active GPUs in reverse, to end up on first GPU when returning from this function
for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev)
{
auto range = gpu_partition.getRangeAndSetGPU(idev);
launcher<max_threads_per_particle>(d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
d_pos,
d_body,
d_diameter,
N,
d_cell_size,
d_cell_xyzf,
d_cell_idx,
d_cell_tdb,
d_cell_adj,
ci,
cli,
cadji,
box,
d_r_cut,
r_buff,
ntypes,
ghost_width,
compute_capability,
threads_per_particle,
filter_body,
diameter_shift,
block_size,
range,
use_index,
ngpu
);
}
return cudaSuccess;
}
|
178ff3ee4822b0c59b6c4cea36fc811f7cdd116c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include "sandbox_Cuda.H"
#include "Config.H"
#include "IntVect.H"
#include "Box.H"
#include "CudaFab.H"
__global__ void
testKernel1(Real* a_fab, IntVect a_ivA)
{
__shared__ IntVect s_iv;
if (threadIdx.x < g_SpaceDim)
{
s_iv[threadIdx.x] = a_ivA[threadIdx.x];
}
IntVect r_iv(s_iv);
r_iv += s_iv;
if (threadIdx.x < g_SpaceDim)
{
a_fab[threadIdx.x] = r_iv[threadIdx.x];
printf("r_iv[%d]: %d ", threadIdx.x, r_iv[threadIdx.x]);
}
}
void
testCuda1(SymbolPair<Real> a_fab)
{
IntVect ivA(D_DECL(2, 3, 4));
IntVect ivB(D_DECL(1, 2, 3));
std::cout << ivA << std::endl;
ivB.max(ivA);
hipLaunchKernelGGL(( testKernel1), dim3(1), dim3(16), 0, 0, static_cast<Real*>(a_fab.device), ivA);
}
/*----------------------------------------------------------------------------*/
__global__ void
testKernel2(Real* a_fab, Box a_box)
{
__shared__ Box s_box;
if (threadIdx.x < 2*g_SpaceDim)
{
s_box[threadIdx.x] = a_box[threadIdx.x];
}
Box r_box(s_box);
r_box.grow(1);
// __syncthreads();
if (threadIdx.x < 2*g_SpaceDim)
{
a_fab[threadIdx.x] = r_box[threadIdx.x];
printf("r_box[%d]: %d ", threadIdx.x, r_box[threadIdx.x]);
}
}
void
testCuda2(SymbolPair<Real> a_fab)
{
Box box(IntVect(D_DECL(-1, 0, 0)), IntVect(D_DECL(0, 1, 1)));
box.shift(1, 0);
std::cout << box << std::endl;
hipLaunchKernelGGL(( testKernel2), dim3(1), dim3(16), 0, 0, static_cast<Real*>(a_fab.device), box);
}
/*----------------------------------------------------------------------------*/
__global__ void
testKernel3(Real* a_fab, Box a_fabBox)
{
__shared__ int stride[g_SpaceDim];
__shared__ int offset;
__shared__ int cstride;
if (threadIdx.x == 0)
{
a_fabBox.getStride(stride);
offset = a_fabBox.getOffset(stride);
cstride = a_fabBox.size();
}
__syncthreads();
IntVect idxVec;
a_fabBox.linToVec(threadIdx.x, stride, idxVec);
int idxLin0 = a_fabBox.vecToLin0(idxVec, stride);
if (idxLin0 + offset != threadIdx.x)
{
printf("Conversion failed for thread %2d: vec: (%2d,%2d,%2d) lin: %2d\n",
threadIdx.x, idxVec[0], idxVec[1], idxVec[2], idxLin0);
}
a_fab[idxLin0 + offset ] = (Real)-1.0;
a_fab[idxLin0 + offset + cstride] = (Real)-2.0;
}
void
testCuda3(SymbolPair<Real> a_fab, const Box& a_fabBox)
{
CH_assert(a_fabBox.size() <= CHDEF_SYSTEM_CUDAATTR_MAX_THREADS_PER_BLOCK);
hipLaunchKernelGGL(( testKernel3), dim3(1), dim3(a_fabBox.size()), 0, 0, static_cast<Real*>(a_fab.device),
a_fabBox);
}
/*----------------------------------------------------------------------------*/
__global__ void
testKernel4(Real* a_fab, Box a_fabBox, Box a_workBox)
{
__shared__ int fabStride[g_SpaceDim];
__shared__ int fabCstride;
if (threadIdx.x == 0)
{
a_fabBox.getStride(fabStride);
fabCstride = a_fabBox.size();
}
__syncthreads();
// Get the cell
IntVect idxVec;
{
a_workBox.linToVec(threadIdx.x, idxVec);
}
// Get index into fab
int idxLin0 = a_fabBox.vecToLin0(idxVec, fabStride);
a_fab[idxLin0 ] = (Real)1.0;
a_fab[idxLin0 + fabCstride] = (Real)2.0;
}
void
testCuda4(SymbolPair<Real> a_fab, const Box& a_fabBox, const Box& a_workBox)
{
CH_assert(a_workBox.size() <= CHDEF_SYSTEM_CUDAATTR_MAX_THREADS_PER_BLOCK);
hipLaunchKernelGGL(( testKernel4), dim3(1), dim3(a_workBox.size()), 0, 0,
// Add the offset into the pointer address
static_cast<Real*>(a_fab.device) + a_fabBox.getOffset(),
a_fabBox,
a_workBox);
}
/*----------------------------------------------------------------------------*/
__global__ void
testKernel5(Real* a_fabData, Box a_fabBox, int a_fabNcomp, Box a_workBox)
{
__shared__ CudaFab<Real> fab;
if (threadIdx.x == 0)
{
// Add the offset into the pointer address
fab.define(a_fabData, a_fabBox, a_fabNcomp);
}
__syncthreads();
// Get the cell
IntVect idxVec;
{
a_workBox.linToVec(threadIdx.x, idxVec);
}
// Get index into fab
fab(idxVec, 0) = (Real)3.0;
fab(idxVec, 1) = (Real)4.0;
}
void
testCuda5(SymbolPair<Real> a_fabData, const Box& a_fabBox, const int a_fabNcomp,
const Box& a_workBox)
{
CH_assert(a_workBox.size() <= CHDEF_SYSTEM_CUDAATTR_MAX_THREADS_PER_BLOCK);
hipLaunchKernelGGL(( testKernel5), dim3(1), dim3(a_workBox.size()), 0, 0,
static_cast<Real*>(a_fabData.device),
a_fabBox,
a_fabNcomp,
a_workBox);
}
/*----------------------------------------------------------------------------*/
__global__ void
testKernel6(CudaFab<Real> a_fabA, CudaFab<Real> a_fabB, const Box a_workBox)
{
__shared__ Real s_slabData[2*3*g_blkSizeGhost*g_blkSizeGhost];
__shared__ SlabFab<Real, 3> s_slabFab;
__shared__ CudaFab<Real> s_fabA;
__shared__ CudaFab<Real> s_fabB;
// Load fab meta-data for Fabs A and B.
{
const int numThrCopy = CudaFab<Real>::numThrCopy();
CH_assert(numThrCopy < blockDim.x);
s_fabA.define(a_fabA, 0, numThrCopy);
s_fabB.define(a_fabB, 0, numThrCopy);
}
// Compute (_Ar_ithmetic) index, saved to avoid repeat linear->vector
// conversion
IntVect ivecAr;
a_workBox.linToVec(threadIdx.x, ivecAr);
// _L_oad/_S_tore index, saved to avoid repeat linear->vector conversion
IntVect ivecLS;
// Set up the cache window
{
Box LSbox(a_workBox); // This is the initial cache window
LSbox.hiVect(2) = LSbox.loVect(2);
LSbox.grow(1);
// Initialize the slab Fab cache. Note that it is shifted one cell towards
// the low end so it can be shifted back at the beginning of the first
// iteration.
LSbox.shift(-1, 2);
int locNDEnd = LSbox.hiVect(2); // vec[2] to end loading initial data
int locNDBeg = locNDEnd - 1; // vec[2] to begin loading initial data
s_slabFab.define(s_slabData, // Data
LSbox, // Initial window
2, // Number of components
2, // Normal direction
locNDBeg, locNDEnd, // Initial loading
ivecLS, // Vector index (output)
0, // Start component in source Fab
s_fabA, // Source data
g_blkSizeGhost*g_blkSizeGhost); // # threads for loading
}
// Loop over slabs
for (int iSlab = 0; iSlab != g_blkSize; ++iSlab)
{
// Shift the slab
s_slabFab.shift(1, ivecLS);
if (threadIdx.x < g_blkSize*g_blkSize)
{
IntVect basis(D_DECL(0, 0, 0));
basis[2] = 1;
s_fabB(ivecAr, 0) = (s_slabFab(ivecAr - basis, 0) +
s_slabFab(ivecAr, 0) +
s_slabFab(ivecAr + basis, 0));
s_fabB(ivecAr, 1) = (s_slabFab(ivecAr - basis, 1) +
s_slabFab(ivecAr, 1) +
s_slabFab(ivecAr + basis, 1));
// Shift the arithmetic IntVect for the next slab iteration
ivecAr[2] += 1;
}
}
}
void
testCuda6(BaseFab<Real> *const a_fabA,
BaseFab<Real> *const a_fabB,
const Box& a_workBox)
{
BaseFabData<Real> *const fabA =
reinterpret_cast<BaseFabData<Real>*>(a_fabA);
CudaFab<Real> cudaFabA;
cudaFabA.define(*fabA);
BaseFabData<Real> *const fabB =
reinterpret_cast<BaseFabData<Real>*>(a_fabB);
CudaFab<Real> cudaFabB;
cudaFabB.define(*fabB);
hipLaunchKernelGGL(( testKernel6), dim3(1), dim3(g_blkSizeGhost*g_blkSizeGhost), 0, 0,
cudaFabA,
cudaFabB,
a_workBox);
}
| 178ff3ee4822b0c59b6c4cea36fc811f7cdd116c.cu | #include <cstdio>
#include "sandbox_Cuda.H"
#include "Config.H"
#include "IntVect.H"
#include "Box.H"
#include "CudaFab.H"
__global__ void
testKernel1(Real* a_fab, IntVect a_ivA)
{
__shared__ IntVect s_iv;
if (threadIdx.x < g_SpaceDim)
{
s_iv[threadIdx.x] = a_ivA[threadIdx.x];
}
IntVect r_iv(s_iv);
r_iv += s_iv;
if (threadIdx.x < g_SpaceDim)
{
a_fab[threadIdx.x] = r_iv[threadIdx.x];
printf("r_iv[%d]: %d ", threadIdx.x, r_iv[threadIdx.x]);
}
}
void
testCuda1(SymbolPair<Real> a_fab)
{
IntVect ivA(D_DECL(2, 3, 4));
IntVect ivB(D_DECL(1, 2, 3));
std::cout << ivA << std::endl;
ivB.max(ivA);
testKernel1<<<1, 16>>>(static_cast<Real*>(a_fab.device), ivA);
}
/*----------------------------------------------------------------------------*/
__global__ void
testKernel2(Real* a_fab, Box a_box)
{
__shared__ Box s_box;
if (threadIdx.x < 2*g_SpaceDim)
{
s_box[threadIdx.x] = a_box[threadIdx.x];
}
Box r_box(s_box);
r_box.grow(1);
// __syncthreads();
if (threadIdx.x < 2*g_SpaceDim)
{
a_fab[threadIdx.x] = r_box[threadIdx.x];
printf("r_box[%d]: %d ", threadIdx.x, r_box[threadIdx.x]);
}
}
void
testCuda2(SymbolPair<Real> a_fab)
{
Box box(IntVect(D_DECL(-1, 0, 0)), IntVect(D_DECL(0, 1, 1)));
box.shift(1, 0);
std::cout << box << std::endl;
testKernel2<<<1, 16>>>(static_cast<Real*>(a_fab.device), box);
}
/*----------------------------------------------------------------------------*/
__global__ void
testKernel3(Real* a_fab, Box a_fabBox)
{
__shared__ int stride[g_SpaceDim];
__shared__ int offset;
__shared__ int cstride;
if (threadIdx.x == 0)
{
a_fabBox.getStride(stride);
offset = a_fabBox.getOffset(stride);
cstride = a_fabBox.size();
}
__syncthreads();
IntVect idxVec;
a_fabBox.linToVec(threadIdx.x, stride, idxVec);
int idxLin0 = a_fabBox.vecToLin0(idxVec, stride);
if (idxLin0 + offset != threadIdx.x)
{
printf("Conversion failed for thread %2d: vec: (%2d,%2d,%2d) lin: %2d\n",
threadIdx.x, idxVec[0], idxVec[1], idxVec[2], idxLin0);
}
a_fab[idxLin0 + offset ] = (Real)-1.0;
a_fab[idxLin0 + offset + cstride] = (Real)-2.0;
}
void
testCuda3(SymbolPair<Real> a_fab, const Box& a_fabBox)
{
CH_assert(a_fabBox.size() <= CHDEF_SYSTEM_CUDAATTR_MAX_THREADS_PER_BLOCK);
testKernel3<<<1, a_fabBox.size()>>>(static_cast<Real*>(a_fab.device),
a_fabBox);
}
/*----------------------------------------------------------------------------*/
__global__ void
testKernel4(Real* a_fab, Box a_fabBox, Box a_workBox)
{
__shared__ int fabStride[g_SpaceDim];
__shared__ int fabCstride;
if (threadIdx.x == 0)
{
a_fabBox.getStride(fabStride);
fabCstride = a_fabBox.size();
}
__syncthreads();
// Get the cell
IntVect idxVec;
{
a_workBox.linToVec(threadIdx.x, idxVec);
}
// Get index into fab
int idxLin0 = a_fabBox.vecToLin0(idxVec, fabStride);
a_fab[idxLin0 ] = (Real)1.0;
a_fab[idxLin0 + fabCstride] = (Real)2.0;
}
void
testCuda4(SymbolPair<Real> a_fab, const Box& a_fabBox, const Box& a_workBox)
{
CH_assert(a_workBox.size() <= CHDEF_SYSTEM_CUDAATTR_MAX_THREADS_PER_BLOCK);
testKernel4<<<1, a_workBox.size()>>>(
// Add the offset into the pointer address
static_cast<Real*>(a_fab.device) + a_fabBox.getOffset(),
a_fabBox,
a_workBox);
}
/*----------------------------------------------------------------------------*/
__global__ void
testKernel5(Real* a_fabData, Box a_fabBox, int a_fabNcomp, Box a_workBox)
{
__shared__ CudaFab<Real> fab;
if (threadIdx.x == 0)
{
// Add the offset into the pointer address
fab.define(a_fabData, a_fabBox, a_fabNcomp);
}
__syncthreads();
// Get the cell
IntVect idxVec;
{
a_workBox.linToVec(threadIdx.x, idxVec);
}
// Get index into fab
fab(idxVec, 0) = (Real)3.0;
fab(idxVec, 1) = (Real)4.0;
}
void
testCuda5(SymbolPair<Real> a_fabData, const Box& a_fabBox, const int a_fabNcomp,
const Box& a_workBox)
{
CH_assert(a_workBox.size() <= CHDEF_SYSTEM_CUDAATTR_MAX_THREADS_PER_BLOCK);
testKernel5<<<1, a_workBox.size()>>>(
static_cast<Real*>(a_fabData.device),
a_fabBox,
a_fabNcomp,
a_workBox);
}
/*----------------------------------------------------------------------------*/
__global__ void
testKernel6(CudaFab<Real> a_fabA, CudaFab<Real> a_fabB, const Box a_workBox)
{
__shared__ Real s_slabData[2*3*g_blkSizeGhost*g_blkSizeGhost];
__shared__ SlabFab<Real, 3> s_slabFab;
__shared__ CudaFab<Real> s_fabA;
__shared__ CudaFab<Real> s_fabB;
// Load fab meta-data for Fabs A and B.
{
const int numThrCopy = CudaFab<Real>::numThrCopy();
CH_assert(numThrCopy < blockDim.x);
s_fabA.define(a_fabA, 0, numThrCopy);
s_fabB.define(a_fabB, 0, numThrCopy);
}
// Compute (_Ar_ithmetic) index, saved to avoid repeat linear->vector
// conversion
IntVect ivecAr;
a_workBox.linToVec(threadIdx.x, ivecAr);
// _L_oad/_S_tore index, saved to avoid repeat linear->vector conversion
IntVect ivecLS;
// Set up the cache window
{
Box LSbox(a_workBox); // This is the initial cache window
LSbox.hiVect(2) = LSbox.loVect(2);
LSbox.grow(1);
// Initialize the slab Fab cache. Note that it is shifted one cell towards
// the low end so it can be shifted back at the beginning of the first
// iteration.
LSbox.shift(-1, 2);
int locNDEnd = LSbox.hiVect(2); // vec[2] to end loading initial data
int locNDBeg = locNDEnd - 1; // vec[2] to begin loading initial data
s_slabFab.define(s_slabData, // Data
LSbox, // Initial window
2, // Number of components
2, // Normal direction
locNDBeg, locNDEnd, // Initial loading
ivecLS, // Vector index (output)
0, // Start component in source Fab
s_fabA, // Source data
g_blkSizeGhost*g_blkSizeGhost); // # threads for loading
}
// Loop over slabs
for (int iSlab = 0; iSlab != g_blkSize; ++iSlab)
{
// Shift the slab
s_slabFab.shift(1, ivecLS);
if (threadIdx.x < g_blkSize*g_blkSize)
{
IntVect basis(D_DECL(0, 0, 0));
basis[2] = 1;
s_fabB(ivecAr, 0) = (s_slabFab(ivecAr - basis, 0) +
s_slabFab(ivecAr, 0) +
s_slabFab(ivecAr + basis, 0));
s_fabB(ivecAr, 1) = (s_slabFab(ivecAr - basis, 1) +
s_slabFab(ivecAr, 1) +
s_slabFab(ivecAr + basis, 1));
// Shift the arithmetic IntVect for the next slab iteration
ivecAr[2] += 1;
}
}
}
void
testCuda6(BaseFab<Real> *const a_fabA,
BaseFab<Real> *const a_fabB,
const Box& a_workBox)
{
BaseFabData<Real> *const fabA =
reinterpret_cast<BaseFabData<Real>*>(a_fabA);
CudaFab<Real> cudaFabA;
cudaFabA.define(*fabA);
BaseFabData<Real> *const fabB =
reinterpret_cast<BaseFabData<Real>*>(a_fabB);
CudaFab<Real> cudaFabB;
cudaFabB.define(*fabB);
testKernel6<<<1, g_blkSizeGhost*g_blkSizeGhost>>>(
cudaFabA,
cudaFabB,
a_workBox);
}
|
288722ce15cbd2e38cea2a3d5a2f61d56194d26f.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void gpukernel()
{
printf("%d %d %d %d\n", blockIdx.x, threadIdx.x, blockDim.x, gridDim.x);
}
int main(int argc, char** argv)
{
// launch a gpu kernel with 3 blocks and 4 threads in each block.
hipLaunchKernelGGL(( gpukernel), dim3(3),dim3(4), 0, 0, );
// block the cpu for the gpu to finish execution
hipDeviceSynchronize();
return 0;
}
| 288722ce15cbd2e38cea2a3d5a2f61d56194d26f.cu | #include <cuda_runtime.h>
#include <stdio.h>
__global__ void gpukernel()
{
printf("%d %d %d %d\n", blockIdx.x, threadIdx.x, blockDim.x, gridDim.x);
}
int main(int argc, char** argv)
{
// launch a gpu kernel with 3 blocks and 4 threads in each block.
gpukernel<<<3,4>>>();
// block the cpu for the gpu to finish execution
cudaDeviceSynchronize();
return 0;
}
|
9732f94191423c682033c349dfddfdb953fd7239.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void kernel_bp_patch_sigma31_sup(float * deriv, float * sigma_imgs, float * imgs, float * F1, float * F2, float * F3, float * FL,
long * output_switches3_x_s31, long * output_switches3_y_s31, long * output_switches2_x_s31, long * output_switches2_y_s31, long * output_switches1_x_s31, long * output_switches1_y_s31,
long * output_switches3_x, long * output_switches3_y, long * output_switches2_x, long * output_switches2_y, long * output_switches1_x, long * output_switches1_y,
int N_IMGS, int N_C, int n0, int n1, int n2, int n3, int s1, int s2, int s3, int max_output_sz3, IND_DTYPE max_output_sz3_max_output_sz3,
IND_DTYPE max_output_sz3_max_output_sz3_n3, IND_DTYPE max_output_sz2_max_output_sz2, IND_DTYPE max_output_sz2_max_output_sz2_n2, IND_DTYPE max_output_sz1_max_output_sz1,
IND_DTYPE max_output_sz1_max_output_sz1_n1, IND_DTYPE img_sz_img_sz_3, IND_DTYPE img_sz_img_sz, int deriv_ind, IND_DTYPE max_output_sz2, IND_DTYPE max_output_sz1, float * pred, int img_sz){
int r = blockIdx.x;
int f1, channel, a1_x, a1_y, f2, a2_x, a2_y, f3, a3_x, a3_y, z1, z2, cat;
int a3_x_global_s31, a3_y_global_s31, a2_x_global_s31, a2_y_global_s31, a1_x_global_s31, a1_y_global_s31;
float F_prod;
IND_DTYPE deriv_in_ind;
int cat_sz = N_C;
int f1_sz = n1;
int f2_sz = n2;
int f3_sz = n3;
int channel_sz = 3;
int a1_x_sz = s1;
int a1_y_sz = s1;
int a2_x_sz = s2;
int a2_y_sz = s2;
int a3_x_sz = s3;
int a3_y_sz = s3;
int z1_sz = max_output_sz3;
int z2_sz = max_output_sz3;
int * cat_i = &cat;
int * f1_i = &f1;
int * f2_i = &f2;
int * f3_i = &f3;
int * channel_i = &channel;
int * a1_x_i = &a1_x;
int * a1_y_i = &a1_y;
int * a2_x_i = &a2_x;
int * a2_y_i = &a2_y;
int * a3_x_i = &a3_x;
int * a3_y_i = &a3_y;
int * z1_i = &z1;
int * z2_i = &z2;
int a3_y_c = threadIdx.x;
a3_y_i = &a3_y_c;
a3_y_sz = 1;
int a3_x_c = threadIdx.y;
a3_x_i = &a3_x_c;
a3_x_sz = 1;
deriv_in_ind = r;
/////////// which loops to unravel across the grid
if(deriv_ind == 1){
int f1_c = r / (3*s1*s1);
r = r % (3*s1*s1);
f1_i = &f1_c;
f1_sz = 1;
int channel_c = r / (s1*s1);
r = r % (s1*s1);
channel_i = &channel_c;
channel_sz = 1;
int a1_x_c = r / s1;
int a1_y_c = r % s1;
a1_x_i = &a1_x_c;
a1_y_i = &a1_y_c;
a1_x_sz = 1;
a1_y_sz = 1;
}else if(deriv_ind == 2){
int f2_c = r / (n1*s2*s2);
r = r % (n1*s2*s2);
f2_i = &f2_c;
f2_sz = 1;
int f1_c = r / (s2*s2);
r = r % (s2*s2);
f1_i = &f1_c;
f1_sz = 1;
int a2_x_c = r / s2;
int a2_y_c = r % s2;
a2_x_i = &a2_x_c;
a2_y_i = &a2_y_c;
a2_x_sz = 1;
a2_y_sz = 1;
}else if(deriv_ind == 3){
int f3_c = r / (n2*s3*s3);
r = r % (n2*s3*s3);
f3_i = &f3_c;
f3_sz = 1;
int f2_c = r / (s3*s3);
r = r % (s3*s3);
f2_i = &f2_c;
f2_sz = 1;
int a3_x_c = r / s3;
int a3_y_c = r % s3;
a3_x_i = &a3_x_c;
a3_y_i = &a3_y_c;
a3_x_sz = 1;
a3_y_sz = 1;
}else if(deriv_ind == 4){
int cat_c = r / (n3*max_output_sz3*max_output_sz3);
r = r % (n3*max_output_sz3*max_output_sz3);
cat_i = &cat_c;
cat_sz = 1;
int f3_c = r / (max_output_sz3*max_output_sz3);
r = r % (max_output_sz3*max_output_sz3);
f3_i = &f3_c;
f3_sz = 1;
int z1_c = r / max_output_sz3;
int z2_c = r % max_output_sz3;
z1_i = &z1_c;
z2_i = &z2_c;
z1_sz = 1;
z2_sz = 1;
}
float temp_deriv = 0;
float F_prod_pred;
float F32, F321;
int switches_3_ind;
int switches_2_ind;
int switches_1_ind;
for(f3=0; f3 < f3_sz; f3++){ for(z1=0; z1 < z1_sz; z1++){ for(z2=0; z2 < z2_sz; z2++){ for(a3_x=0; a3_x < a3_x_sz; a3_x++){ for(a3_y=0; a3_y < a3_y_sz; a3_y++){
for(f2=0; f2 < f2_sz; f2++){ for(a2_x=0; a2_x < a2_x_sz; a2_x++){ for(a2_y=0; a2_y < a2_y_sz; a2_y++){
F32 = F2[F2_IND(*f2_i, *f1_i, *a2_x_i, *a2_y_i)] * F3[F3_IND(*f3_i, *f2_i, *a3_x_i, *a3_y_i)];
for(f1=0; f1 < f1_sz; f1++){ for(a1_x=0; a1_x < a1_x_sz; a1_x++){ for(a1_y=0; a1_y < a1_y_sz; a1_y++){
F321 = F1[F1_IND(*f1_i, *channel_i, *a1_x_i, *a1_y_i)] * F32;
for(cat=0; cat < cat_sz; cat++){
switches_3_ind = O3_IND(*cat_i,*f3_i,*z1_i,*z2_i);
F_prod = F321 * FL[switches_3_ind];
//////////////////////////////////////////////// sup
// pool3 -> conv3
a3_x_global_s31 = output_switches3_x_s31[switches_3_ind] + *a3_x_i;
a3_y_global_s31 = output_switches3_y_s31[switches_3_ind] + *a3_y_i;
// pool2 -> conv2
switches_2_ind = O2_IND(*cat_i,*f2_i,a3_x_global_s31,a3_y_global_s31);
a2_x_global_s31 = output_switches2_x_s31[switches_2_ind] + *a2_x_i;
a2_y_global_s31 = output_switches2_y_s31[switches_2_ind] + *a2_y_i;
switches_1_ind = O1_IND(*cat_i,*f1_i,a2_x_global_s31,a2_y_global_s31);
a1_x_global_s31 = output_switches1_x_s31[switches_1_ind] + *a1_x_i;
a1_y_global_s31 = output_switches1_y_s31[switches_1_ind] + *a1_y_i;
for(channel=0; channel < channel_sz; channel++){
temp_deriv -= N_IMGS * F_prod * sigma_imgs[I_IND(*cat_i, *channel_i,a1_x_global_s31,a1_y_global_s31)];
}/*
}}}}}}}}}}*/
}
}}}}}}}}}}} // FL layer
atomicAdd(&deriv[deriv_in_ind], temp_deriv);
return;
} | 9732f94191423c682033c349dfddfdb953fd7239.cu | __global__ void kernel_bp_patch_sigma31_sup(float * deriv, float * sigma_imgs, float * imgs, float * F1, float * F2, float * F3, float * FL,
long * output_switches3_x_s31, long * output_switches3_y_s31, long * output_switches2_x_s31, long * output_switches2_y_s31, long * output_switches1_x_s31, long * output_switches1_y_s31,
long * output_switches3_x, long * output_switches3_y, long * output_switches2_x, long * output_switches2_y, long * output_switches1_x, long * output_switches1_y,
int N_IMGS, int N_C, int n0, int n1, int n2, int n3, int s1, int s2, int s3, int max_output_sz3, IND_DTYPE max_output_sz3_max_output_sz3,
IND_DTYPE max_output_sz3_max_output_sz3_n3, IND_DTYPE max_output_sz2_max_output_sz2, IND_DTYPE max_output_sz2_max_output_sz2_n2, IND_DTYPE max_output_sz1_max_output_sz1,
IND_DTYPE max_output_sz1_max_output_sz1_n1, IND_DTYPE img_sz_img_sz_3, IND_DTYPE img_sz_img_sz, int deriv_ind, IND_DTYPE max_output_sz2, IND_DTYPE max_output_sz1, float * pred, int img_sz){
int r = blockIdx.x;
int f1, channel, a1_x, a1_y, f2, a2_x, a2_y, f3, a3_x, a3_y, z1, z2, cat;
int a3_x_global_s31, a3_y_global_s31, a2_x_global_s31, a2_y_global_s31, a1_x_global_s31, a1_y_global_s31;
float F_prod;
IND_DTYPE deriv_in_ind;
int cat_sz = N_C;
int f1_sz = n1;
int f2_sz = n2;
int f3_sz = n3;
int channel_sz = 3;
int a1_x_sz = s1;
int a1_y_sz = s1;
int a2_x_sz = s2;
int a2_y_sz = s2;
int a3_x_sz = s3;
int a3_y_sz = s3;
int z1_sz = max_output_sz3;
int z2_sz = max_output_sz3;
int * cat_i = &cat;
int * f1_i = &f1;
int * f2_i = &f2;
int * f3_i = &f3;
int * channel_i = &channel;
int * a1_x_i = &a1_x;
int * a1_y_i = &a1_y;
int * a2_x_i = &a2_x;
int * a2_y_i = &a2_y;
int * a3_x_i = &a3_x;
int * a3_y_i = &a3_y;
int * z1_i = &z1;
int * z2_i = &z2;
int a3_y_c = threadIdx.x;
a3_y_i = &a3_y_c;
a3_y_sz = 1;
int a3_x_c = threadIdx.y;
a3_x_i = &a3_x_c;
a3_x_sz = 1;
deriv_in_ind = r;
/////////// which loops to unravel across the grid
if(deriv_ind == 1){
int f1_c = r / (3*s1*s1);
r = r % (3*s1*s1);
f1_i = &f1_c;
f1_sz = 1;
int channel_c = r / (s1*s1);
r = r % (s1*s1);
channel_i = &channel_c;
channel_sz = 1;
int a1_x_c = r / s1;
int a1_y_c = r % s1;
a1_x_i = &a1_x_c;
a1_y_i = &a1_y_c;
a1_x_sz = 1;
a1_y_sz = 1;
}else if(deriv_ind == 2){
int f2_c = r / (n1*s2*s2);
r = r % (n1*s2*s2);
f2_i = &f2_c;
f2_sz = 1;
int f1_c = r / (s2*s2);
r = r % (s2*s2);
f1_i = &f1_c;
f1_sz = 1;
int a2_x_c = r / s2;
int a2_y_c = r % s2;
a2_x_i = &a2_x_c;
a2_y_i = &a2_y_c;
a2_x_sz = 1;
a2_y_sz = 1;
}else if(deriv_ind == 3){
int f3_c = r / (n2*s3*s3);
r = r % (n2*s3*s3);
f3_i = &f3_c;
f3_sz = 1;
int f2_c = r / (s3*s3);
r = r % (s3*s3);
f2_i = &f2_c;
f2_sz = 1;
int a3_x_c = r / s3;
int a3_y_c = r % s3;
a3_x_i = &a3_x_c;
a3_y_i = &a3_y_c;
a3_x_sz = 1;
a3_y_sz = 1;
}else if(deriv_ind == 4){
int cat_c = r / (n3*max_output_sz3*max_output_sz3);
r = r % (n3*max_output_sz3*max_output_sz3);
cat_i = &cat_c;
cat_sz = 1;
int f3_c = r / (max_output_sz3*max_output_sz3);
r = r % (max_output_sz3*max_output_sz3);
f3_i = &f3_c;
f3_sz = 1;
int z1_c = r / max_output_sz3;
int z2_c = r % max_output_sz3;
z1_i = &z1_c;
z2_i = &z2_c;
z1_sz = 1;
z2_sz = 1;
}
float temp_deriv = 0;
float F_prod_pred;
float F32, F321;
int switches_3_ind;
int switches_2_ind;
int switches_1_ind;
for(f3=0; f3 < f3_sz; f3++){ for(z1=0; z1 < z1_sz; z1++){ for(z2=0; z2 < z2_sz; z2++){ for(a3_x=0; a3_x < a3_x_sz; a3_x++){ for(a3_y=0; a3_y < a3_y_sz; a3_y++){
for(f2=0; f2 < f2_sz; f2++){ for(a2_x=0; a2_x < a2_x_sz; a2_x++){ for(a2_y=0; a2_y < a2_y_sz; a2_y++){
F32 = F2[F2_IND(*f2_i, *f1_i, *a2_x_i, *a2_y_i)] * F3[F3_IND(*f3_i, *f2_i, *a3_x_i, *a3_y_i)];
for(f1=0; f1 < f1_sz; f1++){ for(a1_x=0; a1_x < a1_x_sz; a1_x++){ for(a1_y=0; a1_y < a1_y_sz; a1_y++){
F321 = F1[F1_IND(*f1_i, *channel_i, *a1_x_i, *a1_y_i)] * F32;
for(cat=0; cat < cat_sz; cat++){
switches_3_ind = O3_IND(*cat_i,*f3_i,*z1_i,*z2_i);
F_prod = F321 * FL[switches_3_ind];
//////////////////////////////////////////////// sup
// pool3 -> conv3
a3_x_global_s31 = output_switches3_x_s31[switches_3_ind] + *a3_x_i;
a3_y_global_s31 = output_switches3_y_s31[switches_3_ind] + *a3_y_i;
// pool2 -> conv2
switches_2_ind = O2_IND(*cat_i,*f2_i,a3_x_global_s31,a3_y_global_s31);
a2_x_global_s31 = output_switches2_x_s31[switches_2_ind] + *a2_x_i;
a2_y_global_s31 = output_switches2_y_s31[switches_2_ind] + *a2_y_i;
switches_1_ind = O1_IND(*cat_i,*f1_i,a2_x_global_s31,a2_y_global_s31);
a1_x_global_s31 = output_switches1_x_s31[switches_1_ind] + *a1_x_i;
a1_y_global_s31 = output_switches1_y_s31[switches_1_ind] + *a1_y_i;
for(channel=0; channel < channel_sz; channel++){
temp_deriv -= N_IMGS * F_prod * sigma_imgs[I_IND(*cat_i, *channel_i,a1_x_global_s31,a1_y_global_s31)];
}/*
}}}}}}}}}}*/
}
}}}}}}}}}}} // FL layer
atomicAdd(&deriv[deriv_in_ind], temp_deriv);
return;
} |
15d999f926a1990bae2193b758392f46a736aeea.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "../common/CudaSafeCall.h"
#include "TNNPglobalVariables.cuh"
#include "typedefSparse.h"
#include "sparsePrototypes.cuh"
#include "typedefTNNP.h"
#include "TNNPhostPrototypes.h"
#include "TNNPdevicePrototypes.cuh"
__device__ real TNNP_GNa = TNNP_GNa_0;
__device__ real TNNP_GCaL = TNNP_GCaL_0;
__device__ real TNNP_Gto = TNNP_Gto_0;
__device__ real TNNP_Gkr = TNNP_Gkr_0;
__device__ real TNNP_Gks = TNNP_Gks_0;
__device__ real TNNP_GK1 = TNNP_GK1_0;
__device__ real TNNP_GpCa = TNNP_GpCa_0;
__device__ real TNNP_GpK = TNNP_GpK_0;
__device__ real TNNP_GbNa = TNNP_GbNa_0;
__device__ real TNNP_GbCa = TNNP_GbCa_0;
real TNNP_RestVoltage = TNNP_RestVoltage_0;
void TNNP_meminit(char** res) {
rword resources[] = {
{ "TNNP_Gna", 1001 },
{ "TNNP_GCaL", 1002 },
{ "TNNP_Gto", 1003 },
{ "TNNP_Gkr", 1004 },
{ "TNNP_Gks", 1005 },
{ "TNNP_GK1", 1006 },
{ "TNNP_GpCa", 1007 },
{ "TNNP_GpK", 1008 },
{ "TNNP_GbNa", 1009 },
{ "TNNP_GbCa", 1010 },
{ "TNNP_IV", 1011 },
{ "TNNP_Node", 1100 },
{ "TNNP_Nodetype",1100 },
{ "TNNP_Patch", 1011 },
{ "TNNP_Type", 1100 },
{ "TNNP_Vr", 1012 },
{ "TNNP_Vrest", 1012 },
{ "TNNP_DT", 1013 },
{ NULL, 0 }
};
int i, j, c;
int cmd;
real temp;
i = 0;
while (res[i] != NULL) {
cmd = FindCommand(resources, res[i]);
switch (cmd) {
case 1001:
temp = GetRealValue(res[i]);
hipMemcpyToSymbol(TNNP_GNa, (void *)&temp, sizeof(real), 0, hipMemcpyHostToDevice);
break;
case 1002:
temp = GetRealValue(res[i]);
hipMemcpyToSymbol(TNPP_GCaL, (void *)&temp, sizeof(real), 0, hipMemcpyHostToDevice);
break;
case 1003:
temp = GetRealValue(res[i]);
hipMemcpyToSymbol(TNNP_Gto, (void *)&temp, sizeof(real), 0, hipMemcpyHostToDevice);
break;
case 1004:
temp = GetRealValue(res[i]);
hipMemcpyToSymbol(TNNP_Gkr, (void *)&temp, sizeof(real), 0, hipMemcpyHostToDevice);
break;
case 1005:
temp = GetRealValue(res[i]);
hipMemcpyToSymbol(TNNP_Gks, (void *)&temp, sizeof(real), 0, hipMemcpyHostToDevice);
break;
case 1006:
temp = GetRealValue(res[i]);
hipMemcpyToSymbol(TNNP_GK1, (void *)&temp, sizeof(real), 0, hipMemcpyHostToDevice);
break;
case 1007:
temp = GetRealValue(res[i]);
hipMemcpyToSymbol(TNNP_GpCa, (void *)&temp, sizeof(real), 0, hipMemcpyHostToDevice);
break;
case 1008:
temp = GetRealValue(res[i]);
hipMemcpyToSymbol(TNNP_GpK, (void *)&temp, sizeof(real), 0, hipMemcpyHostToDevice);
break;
case 1009:
temp = GetRealValue(res[i]);
hipMemcpyToSymbol(TNNP_GbNa, (void *)&temp, sizeof(real), 0, hipMemcpyHostToDevice);
break;
case 1010:
temp = GetRealValue(res[i]);
hipMemcpyToSymbol(TNNP_GbCa, (void *)&temp, sizeof(real), 0, hipMemcpyHostToDevice);
break;
case 1011:
/*iv = GetRealArray(res[i]);
p = (real*)(&TNNP_RestPatch);
c = GetNumValues(res[i]);
if (c > TNNP_PatchSize) {
c = TNNP_PatchSize;
}
for (j = 0; j<c; j++) {
p[j] = iv[j];
}*/
break;
case 1012:
TNNP_RestVoltage = GetRealValue(res[i]);
break;
case 1013:
// TNNP_DT = GetRealValue(res[i]);
break;
case 1100:
//TNNP_NodeType = GetByteValue(res[i]);
break;
}
i++;
}
}
void TNNP_gateinit(int memSize, size_t* pitch, gateType* gate_h, gateType* gate_dev, gateType* gate_devF) {
hipHostMalloc((void**)&(gate_h->vm), memSize, 0);
hipHostMalloc((void**)&(gate_h->Cai), memSize, 0);
hipHostMalloc((void**)&(gate_h->CaSR), memSize, 0);
hipHostMalloc((void**)&(gate_h->Nai), memSize, 0);
hipHostMalloc((void**)&(gate_h->Ki), memSize, 0);
hipHostMalloc((void**)&(gate_h->m), memSize, 0);
hipHostMalloc((void**)&(gate_h->h), memSize, 0);
hipHostMalloc((void**)&(gate_h->j), memSize, 0);
hipHostMalloc((void**)&(gate_h->xr1), memSize, 0);
hipHostMalloc((void**)&(gate_h->xr2), memSize, 0);
hipHostMalloc((void**)&(gate_h->xs), memSize, 0);
hipHostMalloc((void**)&(gate_h->r), memSize, 0);
hipHostMalloc((void**)&(gate_h->s), memSize, 0);
hipHostMalloc((void**)&(gate_h->d), memSize, 0);
hipHostMalloc((void**)&(gate_h->f), memSize, 0);
hipHostMalloc((void**)&(gate_h->fca), memSize, 0);
hipHostMalloc((void**)&(gate_h->g), memSize, 0);
// Allocate device memory arrays
CudaSafeCall(hipMallocPitch((void **)&gate_dev->vm, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->Cai, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->CaSR, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->Nai, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->Ki, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->m, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->h, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->j, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->xr1, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->xr2, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->xs, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->r, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->s, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->d, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->f, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->fca, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->g, pitch,
memSize, 1));
// Allocate device forward memory arrays
CudaSafeCall(hipMallocPitch((void **)&gate_devF->vm, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->Cai, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->CaSR, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->Nai, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->Ki, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->m, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->h, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->j, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->xr1, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->xr2, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->xs, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->r, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->s, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->d, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->f, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->fca, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->g, pitch,
memSize, 1));
puts("\nFinished allocating device arrays\n");
int totpoints = (int)memSize / sizeof(real);
for (int idx = 0; idx < totpoints; idx++) {
gate_h->vm[idx] = TNNP_RestVoltage;
gate_h->Cai[idx] = 0.0002;
gate_h->CaSR[idx] = 0.2;
gate_h->Nai[idx] = 11.6;
gate_h->Ki[idx] = 138.3;
gate_h->m[idx] = 0.0;
gate_h->h[idx] = 0.75;
gate_h->j[idx] = 0.75;
gate_h->xr1[idx] = 0.0;
gate_h->xr2[idx] = 1.0;
gate_h->xs[idx] = 0.0;
gate_h->r[idx] = 0.0;
gate_h->s[idx] = 1.0;
gate_h->d[idx] = 0.0;
gate_h->f[idx] = 1.0;
gate_h->fca[idx] = 1.0;
gate_h->g)[idx] = 1.0;
}
CudaSafeCall(hipMemcpy2D((void *)gate_dev->vm, *pitch, (void *)gate_h->vm,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->Cai, *pitch, (void *)gate_h->Cai,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->CaSR, *pitch, (void *)gate_h->CaSR,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->Nai, *pitch, (void *)gate_h->Nai,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->Ki, *pitch, (void *)gate_h->Ki,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->m, *pitch, (void *)gate_h->m,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->h, *pitch, (void *)gate_h->h,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->j, *pitch, (void *)gate_h->j,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->xr1, *pitch, (void *)gate_h->xr1,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->xr2, *pitch, (void *)gate_h->xr2,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->xs, *pitch, (void *)gate_h->xs,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->r, *pitch, (void *)gate_h->r,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->s, *pitch, (void *)gate_h->s,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->d, *pitch, (void *)gate_h->d,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->,f *pitch, (void *)gate_h->f,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->fca, *pitch, (void *)gate_h->fca,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->g, *pitch, (void *)gate_h->g,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->vm, *pitch, (void *)gate_h->vm,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->Cai, *pitch, (void *)gate_h->Cai,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->CaSR, *pitch, (void *)gate_h->CaSR,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->Nai, *pitch, (void *)gate_h->Nai,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->Ki, *pitch, (void *)gate_h->Ki,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->m, *pitch, (void *)gate_h->m,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->h, *pitch, (void *)gate_h->h,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->j, *pitch, (void *)gate_h->j,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->xr1, *pitch, (void *)gate_h->xr1,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->xr2, *pitch, (void *)gate_h->xr2,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->xs, *pitch, (void *)gate_h->xs,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->r, *pitch, (void *)gate_h->r,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->s, *pitch, (void *)gate_h->s,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->d, *pitch, (void *)gate_h->d,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->,f *pitch, (void *)gate_h->f,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->fca, *pitch, (void *)gate_h->fca,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->g, *pitch, (void *)gate_h->g,
memSize, memSize, 1, hipMemcpyHostToDevice));
real** qpH = (real**)malloc(sizeof(real *)*gate_h->qpl);
int i = 0;
qpH[i++] = gate_devF->Cai;
qpH[i++] = gate_devF->CaSR;
qpH[i++] = gate_devF->Nai;
qpH[i++] = gate_devF->Ki;
qpH[i++] = gate_devF->m;
qpH[i++] = gate_devF->h;
qpH[i++] = gate_devF->j;
qpH[i++] = gate_devF->xr1;
qpH[i++] = gate_devF->xr2;
qpH[i++] = gate_devF->xs;
qpH[i++] = gate_devF->r;
qpH[i++] = gate_devF->s;
qpH[i++] = gate_devF->d;
qpH[i++] = gate_devF->f;
qpH[i++] = gate_devF->fca;
qpH[i++] = gate_devF->g;
CudaSafeCall(hipMemcpy((void *)gate_devF->qp, (void*)qpH, sizeof(real *)*gate_h->qpl, hipMemcpyHostToDevice));
i = 0;
qpH[i++] = gate_dev->Cai;
qpH[i++] = gate_dev->CaSR;
qpH[i++] = gate_dev->Nai;
qpH[i++] = gate_dev->Ki;
qpH[i++] = gate_dev->m;
qpH[i++] = gate_dev->h;
qpH[i++] = gate_dev->j;
qpH[i++] = gate_dev->xr1;
qpH[i++] = gate_dev->xr2;
qpH[i++] = gate_dev->xs;
qpH[i++] = gate_dev->r;
qpH[i++] = gate_dev->s;
qpH[i++] = gate_dev->d;
qpH[i++] = gate_dev->f;
qpH[i++] = gate_dev->fca;
qpH[i++] = gate_dev->g;
CudaSafeCall(hipMemcpy((void *)gate_dev->qp, (void*)qpH, sizeof(real *)*gate_h->qpl, hipMemcpyHostToDevice));
CudaCheckError();
puts("\nFinished initializing device arrays\n");
}
void TNNP_sync(int memSize, size_t pitch, gateType* gate_h, gateType* gate_dev) {
CudaSafeCall(hipMemcpy2D((void *)gate_h->vm, *pitch, (void *)gate_dev->vm,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->Cai, *pitch, (void *)gate_dev->Cai,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->CaSR, *pitch, (void *)gate_dev->CaSR,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->Nai, *pitch, (void *)gate_dev->Nai,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->Ki, *pitch, (void *)gate_dev->Ki,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->m, *pitch, (void *)gate_dev->m,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->h, *pitch, (void *)gate_dev->h,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->j, *pitch, (void *)gate_dev->j,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->xr1, *pitch, (void *)gate_dev->xr1,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->xr2, *pitch, (void *)gate_dev->xr2,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->xs, *pitch, (void *)gate_dev->xs,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->r, *pitch, (void *)gate_dev->r,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->s, *pitch, (void *)gate_dev->s,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->d, *pitch, (void *)gate_dev->d,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->,f *pitch, (void *)gate_dev->f,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->fca, *pitch, (void *)gate_dev->fca,
memSize, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->g, *pitch, (void *)gate_dev->g,
memSize, memSize, 1, hipMemcpyDeviceToHost));
}
void TNNP_exit(int memSize, size_t pitch, gateType* gate_h, gateType* gate_dev, gateType* gate_devF, sparse* MatrixINT, cudasparse* cudaMatrixINT){
// Free gate host and device memory
hipHostFree(gate_h->vm); hipHostFree(gate_h->Cai); hipHostFree(gate_h->CaSR); hipHostFree(gate_h->Nai); hipHostFree(gate_h->Ki); hipHostFree(gate_h->m); hipHostFree(gate_h->h); hipHostFree(gate_h->j); hipHostFree(gate_h->xr1); hipHostFree(gate_h->xr2); hipHostFree(gate_h->xs); hipHostFree(gate_h->r); hipHostFree(gate_h->s); hipHostFree(gate_h->d); hipHostFree(gate_h->f); hipHostFree(gate_h->fca); hipHostFree(gate_h->g); hipHostFree(gate_h->qp);
hipFree(gate_dev->vm); hipFree(gate_dev->Cai); hipFree(gate_dev->CaSR); hipFree(gate_dev->Nai); hipFree(gate_dev->Ki); hipFree(gate_dev->m); hipFree(gate_dev->h); hipFree(gate_dev->j); hipFree(gate_dev->xr1); hipFree(gate_dev->xr2); hipFree(gate_dev->xs); hipFree(gate_dev->r); hipFree(gate_dev->s); hipFree(gate_dev->d); hipFree(gate_dev->f); hipFree(gate_dev->fca); hipFree(gate_dev->g); hipFree(gate_dev->qp);
hipFree(gate_devF->vm); hipFree(gate_devF->Cai); hipFree(gate_devF->CaSR); hipFree(gate_devF->Nai); hipFree(gate_devF->Ki); hipFree(gate_devF->m); hipFree(gate_devF->h); hipFree(gate_devF->j); hipFree(gate_devF->xr1); hipFree(gate_devF->xr2); hipFree(gate_devF->xs); hipFree(gate_devF->r); hipFree(gate_devF->s); hipFree(gate_devF->d); hipFree(gate_devF->f); hipFree(gate_devF->fca); hipFree(gate_devF->g); hipFree(gate_devF->qp);
hipFree(cudaMatrixINT->type);
hipFree(cudaMatrixINT->rows);
hipFree(cudaMatrixINT->maxnz);
hipFree(cudaMatrixINT->csep);
hipFree(cudaMatrixINT->jcoef);
hipFree(cudaMatrixINT->coef);
}
void __device__ GetFDev_TNNP(int i2d, int pitch, real beta, real Cm, real t, real dt, int totpoints, real rx, gateType g_dev, gateType g_devF) {
/*------------------------------------------------------------------------
* return if outside domain
*------------------------------------------------------------------------
*/
if (i2d >= totpoints) {
return;
}
real V,Ek,Ena,Eks,Eca;
real Iion,INa,ICaL,Ito,IKr,IKs,IK1,INaCa,INaK,IpCa,IpK,IbNa,IbCa;
real Nai,Ki,m,h,j,xr1,xr2,xs,r,s,d,f,fca,g;
real Caisquare,Cai,CaSRsquare,CaCurrent,A,Irel,Ileak,SERCA,CaSRCurrent;
real CaSR,CaCSQN,dCaSR,bjsr,cjsr,CaBuf,dCai,bc,cc;
real AM,BM,TAU_M,M_INF,AH_1,BH_1,TAU_H,AH_2,BH_2,H_INF,AJ_1,BJ_1;
real TAU_J,AJ_2,BJ_2,J_INF,Xr1_INF,axr1,bxr1,TAU_Xr1,Xr2_INF,axr2,bxr2;
real TAU_Xr2,Xs_INF,Axs,Bxs,TAU_Xs,R_INF,S_INF,TAU_R,TAU_S,D_INF,Ad,Bd,Cd;
real TAU_D,F_INF,TAU_F,FCa_INF,G_INF,fcaold,gold;
real rec_iK1,rec_ipK,rec_iNaK,Ak1,Bk1;
real CaiNew,CaSRNew,fcatemp,gtemp;
real V = g_dev.vm[i2d];
real Cai = g_dev.Cai[i2d];
real CaSR = g_dev.CaSR[i2d];
real Nai = g_dev.Nai[i2d];
real Ki = g_dev.Ki[i2d];
real m = g_dev.m[i2d];
real h = g_dev.h[i2d];
real j = g_dev.j[i2d];
real xr1 = g_dev.xr1[i2d];
real xr2 = g_dev.xr2[i2d];
real xs = g_dev.xs[i2d];
real r = g_dev.r[i2d];
real s = g_dev.s[i2d];
real d = g_dev.d[i2d];
real f = g_dev.f[i2d];
real fca = g_dev.fca[i2d];
real g = g_dev.g[i2d];
/*------------------------------------------------------------------------
* setting local variables
*------------------------------------------------------------------------
*/
real fv = g_devF.vm[i2d];
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(V-Ek-200)));
Bk1=(3.*exp(0.0002*(V-Ek+100))+exp(0.1*(V-Ek-10)))/(1.+exp(-0.5*(V-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*V*F/(R*T))+0.0353*exp(-V*F/(R*T))));
rec_ipK=1./(1.+exp((25-V)/5.98));
INa=TNNP_GNa*m*m*m*h*j*(V-Ena);
ICaL=TNNP_GCaL*d*f*fca*4*V*(F*F/(R*T))*
(exp(2*V*F/(R*T))*Cai-0.341*Cao)/(exp(2*V*F/(R*T))-1.);
Ito=TNNP_Gto*r*s*(V-Ek);
IKr=TNNP_Gkr*sqrt(Ko/5.4)*xr1*xr2*(V-Ek);
IKs=TNNP_Gks*xs*xs*(V-Eks);
IK1=TNNP_GK1*rec_iK1*(V-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*V*F/(R*T))))*
(exp(n*V*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*V*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=TNNP_GpCa*Cai/(KpCa+Cai);
IpK=TNNP_GpK*rec_ipK*(V-Ek);
IbNa=TNNP_GbNa*(V-Ena);
IbCa=TNNP_GbCa*(V-Eca);
Iion = IKr+IKs+IK1+Ito+INa+IbNa+ICaL+IbCa+INaK+INaCa+IpCa+IpK;
/* Update Auxilliary Variables */
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464*CaSRsquare/(0.0625+CaSRsquare)+0.008232;
Irel=A*d*g;
Ileak=0.00008*(CaSR-Cai);
SERCA=Vmaxup/(1.+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSRNew=(sqrt(bjsr*bjsr+4*cjsr)-bjsr)/2;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
CaiNew=(sqrt(bc*bc+4*cc)-bc)/2;
AM=1./(1.+exp((-60.-V)/5.));
BM=0.1/(1.+exp((V+35.)/5.))+0.10/(1.+exp((V-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-V)/9.03))*(1.+exp((-56.86-V)/9.03)));
if (V>=-40.){
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(V+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else{
AH_2=(0.057*exp(-(V+80.)/6.8));
BH_2=(2.7*exp(0.079*V)+(3.1e5)*exp(0.3485*V));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((V+71.55)/7.43))*(1.+exp((V+71.55)/7.43)));
if(V>=-40.){
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*V)/(1.+exp(-0.1*(V+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else{
AJ_2=(((-2.5428e4)*exp(0.2444*V)-(6.948e-6)*
exp(-0.04391*V))*(V+37.78)/
(1.+exp(0.311*(V+79.23))));
BJ_2=(0.02424*exp(-0.01052*V)/(1.+exp(-0.1378*(V+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-V)/7.));
axr1=450./(1.+exp((-45.-V)/10.));
bxr1=6./(1.+exp((V-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((V-(-88.))/24.));
axr2=3./(1.+exp((-60.-V)/20.));
bxr2=1.12/(1.+exp((V-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-V)/14.));
Axs=1100./(sqrt(1.+exp((-10.-V)/6)));
Bxs=1./(1.+exp((V-60.)/20.));
TAU_Xs=Axs*Bxs;
R_INF=1./(1.+exp((20-V)/6.));
S_INF=1./(1.+exp((V+20)/5.));
TAU_R=9.5*exp(-(V+40.)*(V+40.)/1800.)+0.8;
TAU_S=85.*exp(-(V+45.)*(V+45.)/320.)+5./(1.+exp((V-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-V)/7.5));
Ad=1.4/(1.+exp((-35-V)/13))+0.25;
Bd=1.4/(1.+exp((V+5)/5));
Cd=1./(1.+exp((50-V)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((V+20)/7));
TAU_F=1125*exp(-(V+27)*(V+27)/240)+80+165/(1.+exp((25-V)/10));
FCa_INF=(1./(1.+pow((CaiNew/0.000325),8))+
0.1/(1.+exp((CaiNew-0.0005)/0.0001))+
0.20/(1.+exp((CaiNew-0.00075)/0.0008))+
0.23 )/1.46;
if(CaiNew<0.00035) G_INF=1./(1.+pow((CaiNew/0.00035),6));
else G_INF=1./(1.+pow((CaiNew/0.00035),16));
fv -= Iion;
g_devF.vm[i2d] = fv;
g_devF.Cai[i2d] = (CaiNew-Cai)/dt;
g_devF.CaSR[i2d] = (CaSRNew-CaSR)/dt;
g_devF.Nai[i2d] = -(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
g_devF.Ki[i2d] = -(IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
g_devF.m[i2d] = (M_INF-(M_INF-m)*exp(-dt/TAU_M)-m)/dt;
g_devF.h[i2d] = (H_INF-(H_INF-h)*exp(-dt/TAU_H)-h)/dt;
g_devF.j[i2d] = (J_INF-(J_INF-j)*exp(-dt/TAU_J)-j)/dt;
g_devF.xr1[i2d] = (Xr1_INF-(Xr1_INF-xr1)*exp(-dt/TAU_Xr1)-xr1)/dt;
g_devF.xr2[i2d] = (Xr2_INF-(Xr2_INF-xr2)*exp(-dt/TAU_Xr2)-xr2)/dt;
g_devF.xs[i2d] = (Xs_INF-(Xs_INF-xs)*exp(-dt/TAU_Xs)-xs)/dt;
g_devF.s[i2d] = (S_INF-(S_INF-s)*exp(-dt/TAU_S)-s)/dt;
g_devF.r[i2d] = (R_INF-(R_INF-r)*exp(-dt/TAU_R)-r)/dt;
g_devF.d[i2d] = (D_INF-(D_INF-d)*exp(-dt/TAU_D)-d)/dt;
g_devF.f[i2d] = (F_INF-(F_INF-f)*exp(-dt/TAU_F)-f)/dt;
fcaold = fca;
fcatemp = FCa_INF-(FCa_INF-fca)*exp(-dt/taufca);
if(fcatemp>fcaold && (V)>-60){
g_devF.fca[i2d] =(fcaold-fca)/dt;
}
else{
g_devF.fca[i2d] =(fcatemp-fca)/dt;
}
gold = g;
gtemp = G_INF-(G_INF-g)*exp(-dt/taug);
if(gtemp>gold && (V)>-60){
g_devF.g[i2d] = (gold-g)/dt;
}
else{
g_devF.g[i2d]= (gtemp-g)/dt;
}
} | 15d999f926a1990bae2193b758392f46a736aeea.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "../common/CudaSafeCall.h"
#include "TNNPglobalVariables.cuh"
#include "typedefSparse.h"
#include "sparsePrototypes.cuh"
#include "typedefTNNP.h"
#include "TNNPhostPrototypes.h"
#include "TNNPdevicePrototypes.cuh"
__device__ real TNNP_GNa = TNNP_GNa_0;
__device__ real TNNP_GCaL = TNNP_GCaL_0;
__device__ real TNNP_Gto = TNNP_Gto_0;
__device__ real TNNP_Gkr = TNNP_Gkr_0;
__device__ real TNNP_Gks = TNNP_Gks_0;
__device__ real TNNP_GK1 = TNNP_GK1_0;
__device__ real TNNP_GpCa = TNNP_GpCa_0;
__device__ real TNNP_GpK = TNNP_GpK_0;
__device__ real TNNP_GbNa = TNNP_GbNa_0;
__device__ real TNNP_GbCa = TNNP_GbCa_0;
real TNNP_RestVoltage = TNNP_RestVoltage_0;
void TNNP_meminit(char** res) {
rword resources[] = {
{ "TNNP_Gna", 1001 },
{ "TNNP_GCaL", 1002 },
{ "TNNP_Gto", 1003 },
{ "TNNP_Gkr", 1004 },
{ "TNNP_Gks", 1005 },
{ "TNNP_GK1", 1006 },
{ "TNNP_GpCa", 1007 },
{ "TNNP_GpK", 1008 },
{ "TNNP_GbNa", 1009 },
{ "TNNP_GbCa", 1010 },
{ "TNNP_IV", 1011 },
{ "TNNP_Node", 1100 },
{ "TNNP_Nodetype",1100 },
{ "TNNP_Patch", 1011 },
{ "TNNP_Type", 1100 },
{ "TNNP_Vr", 1012 },
{ "TNNP_Vrest", 1012 },
{ "TNNP_DT", 1013 },
{ NULL, 0 }
};
int i, j, c;
int cmd;
real temp;
i = 0;
while (res[i] != NULL) {
cmd = FindCommand(resources, res[i]);
switch (cmd) {
case 1001:
temp = GetRealValue(res[i]);
cudaMemcpyToSymbol(TNNP_GNa, (void *)&temp, sizeof(real), 0, cudaMemcpyHostToDevice);
break;
case 1002:
temp = GetRealValue(res[i]);
cudaMemcpyToSymbol(TNPP_GCaL, (void *)&temp, sizeof(real), 0, cudaMemcpyHostToDevice);
break;
case 1003:
temp = GetRealValue(res[i]);
cudaMemcpyToSymbol(TNNP_Gto, (void *)&temp, sizeof(real), 0, cudaMemcpyHostToDevice);
break;
case 1004:
temp = GetRealValue(res[i]);
cudaMemcpyToSymbol(TNNP_Gkr, (void *)&temp, sizeof(real), 0, cudaMemcpyHostToDevice);
break;
case 1005:
temp = GetRealValue(res[i]);
cudaMemcpyToSymbol(TNNP_Gks, (void *)&temp, sizeof(real), 0, cudaMemcpyHostToDevice);
break;
case 1006:
temp = GetRealValue(res[i]);
cudaMemcpyToSymbol(TNNP_GK1, (void *)&temp, sizeof(real), 0, cudaMemcpyHostToDevice);
break;
case 1007:
temp = GetRealValue(res[i]);
cudaMemcpyToSymbol(TNNP_GpCa, (void *)&temp, sizeof(real), 0, cudaMemcpyHostToDevice);
break;
case 1008:
temp = GetRealValue(res[i]);
cudaMemcpyToSymbol(TNNP_GpK, (void *)&temp, sizeof(real), 0, cudaMemcpyHostToDevice);
break;
case 1009:
temp = GetRealValue(res[i]);
cudaMemcpyToSymbol(TNNP_GbNa, (void *)&temp, sizeof(real), 0, cudaMemcpyHostToDevice);
break;
case 1010:
temp = GetRealValue(res[i]);
cudaMemcpyToSymbol(TNNP_GbCa, (void *)&temp, sizeof(real), 0, cudaMemcpyHostToDevice);
break;
case 1011:
/*iv = GetRealArray(res[i]);
p = (real*)(&TNNP_RestPatch);
c = GetNumValues(res[i]);
if (c > TNNP_PatchSize) {
c = TNNP_PatchSize;
}
for (j = 0; j<c; j++) {
p[j] = iv[j];
}*/
break;
case 1012:
TNNP_RestVoltage = GetRealValue(res[i]);
break;
case 1013:
// TNNP_DT = GetRealValue(res[i]);
break;
case 1100:
//TNNP_NodeType = GetByteValue(res[i]);
break;
}
i++;
}
}
void TNNP_gateinit(int memSize, size_t* pitch, gateType* gate_h, gateType* gate_dev, gateType* gate_devF) {
cudaHostAlloc((void**)&(gate_h->vm), memSize, 0);
cudaHostAlloc((void**)&(gate_h->Cai), memSize, 0);
cudaHostAlloc((void**)&(gate_h->CaSR), memSize, 0);
cudaHostAlloc((void**)&(gate_h->Nai), memSize, 0);
cudaHostAlloc((void**)&(gate_h->Ki), memSize, 0);
cudaHostAlloc((void**)&(gate_h->m), memSize, 0);
cudaHostAlloc((void**)&(gate_h->h), memSize, 0);
cudaHostAlloc((void**)&(gate_h->j), memSize, 0);
cudaHostAlloc((void**)&(gate_h->xr1), memSize, 0);
cudaHostAlloc((void**)&(gate_h->xr2), memSize, 0);
cudaHostAlloc((void**)&(gate_h->xs), memSize, 0);
cudaHostAlloc((void**)&(gate_h->r), memSize, 0);
cudaHostAlloc((void**)&(gate_h->s), memSize, 0);
cudaHostAlloc((void**)&(gate_h->d), memSize, 0);
cudaHostAlloc((void**)&(gate_h->f), memSize, 0);
cudaHostAlloc((void**)&(gate_h->fca), memSize, 0);
cudaHostAlloc((void**)&(gate_h->g), memSize, 0);
// Allocate device memory arrays
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->vm, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->Cai, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->CaSR, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->Nai, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->Ki, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->m, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->h, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->j, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->xr1, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->xr2, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->xs, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->r, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->s, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->d, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->f, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->fca, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->g, pitch,
memSize, 1));
// Allocate device forward memory arrays
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->vm, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->Cai, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->CaSR, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->Nai, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->Ki, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->m, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->h, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->j, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->xr1, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->xr2, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->xs, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->r, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->s, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->d, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->f, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->fca, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->g, pitch,
memSize, 1));
puts("\nFinished allocating device arrays\n");
int totpoints = (int)memSize / sizeof(real);
for (int idx = 0; idx < totpoints; idx++) {
gate_h->vm[idx] = TNNP_RestVoltage;
gate_h->Cai[idx] = 0.0002;
gate_h->CaSR[idx] = 0.2;
gate_h->Nai[idx] = 11.6;
gate_h->Ki[idx] = 138.3;
gate_h->m[idx] = 0.0;
gate_h->h[idx] = 0.75;
gate_h->j[idx] = 0.75;
gate_h->xr1[idx] = 0.0;
gate_h->xr2[idx] = 1.0;
gate_h->xs[idx] = 0.0;
gate_h->r[idx] = 0.0;
gate_h->s[idx] = 1.0;
gate_h->d[idx] = 0.0;
gate_h->f[idx] = 1.0;
gate_h->fca[idx] = 1.0;
gate_h->g)[idx] = 1.0;
}
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->vm, *pitch, (void *)gate_h->vm,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->Cai, *pitch, (void *)gate_h->Cai,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->CaSR, *pitch, (void *)gate_h->CaSR,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->Nai, *pitch, (void *)gate_h->Nai,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->Ki, *pitch, (void *)gate_h->Ki,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->m, *pitch, (void *)gate_h->m,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->h, *pitch, (void *)gate_h->h,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->j, *pitch, (void *)gate_h->j,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->xr1, *pitch, (void *)gate_h->xr1,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->xr2, *pitch, (void *)gate_h->xr2,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->xs, *pitch, (void *)gate_h->xs,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->r, *pitch, (void *)gate_h->r,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->s, *pitch, (void *)gate_h->s,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->d, *pitch, (void *)gate_h->d,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->,f *pitch, (void *)gate_h->f,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->fca, *pitch, (void *)gate_h->fca,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->g, *pitch, (void *)gate_h->g,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->vm, *pitch, (void *)gate_h->vm,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->Cai, *pitch, (void *)gate_h->Cai,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->CaSR, *pitch, (void *)gate_h->CaSR,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->Nai, *pitch, (void *)gate_h->Nai,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->Ki, *pitch, (void *)gate_h->Ki,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->m, *pitch, (void *)gate_h->m,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->h, *pitch, (void *)gate_h->h,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->j, *pitch, (void *)gate_h->j,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->xr1, *pitch, (void *)gate_h->xr1,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->xr2, *pitch, (void *)gate_h->xr2,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->xs, *pitch, (void *)gate_h->xs,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->r, *pitch, (void *)gate_h->r,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->s, *pitch, (void *)gate_h->s,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->d, *pitch, (void *)gate_h->d,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->,f *pitch, (void *)gate_h->f,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->fca, *pitch, (void *)gate_h->fca,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->g, *pitch, (void *)gate_h->g,
memSize, memSize, 1, cudaMemcpyHostToDevice));
real** qpH = (real**)malloc(sizeof(real *)*gate_h->qpl);
int i = 0;
qpH[i++] = gate_devF->Cai;
qpH[i++] = gate_devF->CaSR;
qpH[i++] = gate_devF->Nai;
qpH[i++] = gate_devF->Ki;
qpH[i++] = gate_devF->m;
qpH[i++] = gate_devF->h;
qpH[i++] = gate_devF->j;
qpH[i++] = gate_devF->xr1;
qpH[i++] = gate_devF->xr2;
qpH[i++] = gate_devF->xs;
qpH[i++] = gate_devF->r;
qpH[i++] = gate_devF->s;
qpH[i++] = gate_devF->d;
qpH[i++] = gate_devF->f;
qpH[i++] = gate_devF->fca;
qpH[i++] = gate_devF->g;
CudaSafeCall(cudaMemcpy((void *)gate_devF->qp, (void*)qpH, sizeof(real *)*gate_h->qpl, cudaMemcpyHostToDevice));
i = 0;
qpH[i++] = gate_dev->Cai;
qpH[i++] = gate_dev->CaSR;
qpH[i++] = gate_dev->Nai;
qpH[i++] = gate_dev->Ki;
qpH[i++] = gate_dev->m;
qpH[i++] = gate_dev->h;
qpH[i++] = gate_dev->j;
qpH[i++] = gate_dev->xr1;
qpH[i++] = gate_dev->xr2;
qpH[i++] = gate_dev->xs;
qpH[i++] = gate_dev->r;
qpH[i++] = gate_dev->s;
qpH[i++] = gate_dev->d;
qpH[i++] = gate_dev->f;
qpH[i++] = gate_dev->fca;
qpH[i++] = gate_dev->g;
CudaSafeCall(cudaMemcpy((void *)gate_dev->qp, (void*)qpH, sizeof(real *)*gate_h->qpl, cudaMemcpyHostToDevice));
CudaCheckError();
puts("\nFinished initializing device arrays\n");
}
void TNNP_sync(int memSize, size_t pitch, gateType* gate_h, gateType* gate_dev) {
CudaSafeCall(cudaMemcpy2D((void *)gate_h->vm, *pitch, (void *)gate_dev->vm,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->Cai, *pitch, (void *)gate_dev->Cai,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->CaSR, *pitch, (void *)gate_dev->CaSR,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->Nai, *pitch, (void *)gate_dev->Nai,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->Ki, *pitch, (void *)gate_dev->Ki,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->m, *pitch, (void *)gate_dev->m,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->h, *pitch, (void *)gate_dev->h,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->j, *pitch, (void *)gate_dev->j,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->xr1, *pitch, (void *)gate_dev->xr1,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->xr2, *pitch, (void *)gate_dev->xr2,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->xs, *pitch, (void *)gate_dev->xs,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->r, *pitch, (void *)gate_dev->r,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->s, *pitch, (void *)gate_dev->s,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->d, *pitch, (void *)gate_dev->d,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->,f *pitch, (void *)gate_dev->f,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->fca, *pitch, (void *)gate_dev->fca,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->g, *pitch, (void *)gate_dev->g,
memSize, memSize, 1, cudaMemcpyDeviceToHost));
}
void TNNP_exit(int memSize, size_t pitch, gateType* gate_h, gateType* gate_dev, gateType* gate_devF, sparse* MatrixINT, cudasparse* cudaMatrixINT){
// Free gate host and device memory
cudaFreeHost(gate_h->vm); cudaFreeHost(gate_h->Cai); cudaFreeHost(gate_h->CaSR); cudaFreeHost(gate_h->Nai); cudaFreeHost(gate_h->Ki); cudaFreeHost(gate_h->m); cudaFreeHost(gate_h->h); cudaFreeHost(gate_h->j); cudaFreeHost(gate_h->xr1); cudaFreeHost(gate_h->xr2); cudaFreeHost(gate_h->xs); cudaFreeHost(gate_h->r); cudaFreeHost(gate_h->s); cudaFreeHost(gate_h->d); cudaFreeHost(gate_h->f); cudaFreeHost(gate_h->fca); cudaFreeHost(gate_h->g); cudaFreeHost(gate_h->qp);
cudaFree(gate_dev->vm); cudaFree(gate_dev->Cai); cudaFree(gate_dev->CaSR); cudaFree(gate_dev->Nai); cudaFree(gate_dev->Ki); cudaFree(gate_dev->m); cudaFree(gate_dev->h); cudaFree(gate_dev->j); cudaFree(gate_dev->xr1); cudaFree(gate_dev->xr2); cudaFree(gate_dev->xs); cudaFree(gate_dev->r); cudaFree(gate_dev->s); cudaFree(gate_dev->d); cudaFree(gate_dev->f); cudaFree(gate_dev->fca); cudaFree(gate_dev->g); cudaFree(gate_dev->qp);
cudaFree(gate_devF->vm); cudaFree(gate_devF->Cai); cudaFree(gate_devF->CaSR); cudaFree(gate_devF->Nai); cudaFree(gate_devF->Ki); cudaFree(gate_devF->m); cudaFree(gate_devF->h); cudaFree(gate_devF->j); cudaFree(gate_devF->xr1); cudaFree(gate_devF->xr2); cudaFree(gate_devF->xs); cudaFree(gate_devF->r); cudaFree(gate_devF->s); cudaFree(gate_devF->d); cudaFree(gate_devF->f); cudaFree(gate_devF->fca); cudaFree(gate_devF->g); cudaFree(gate_devF->qp);
cudaFree(cudaMatrixINT->type);
cudaFree(cudaMatrixINT->rows);
cudaFree(cudaMatrixINT->maxnz);
cudaFree(cudaMatrixINT->csep);
cudaFree(cudaMatrixINT->jcoef);
cudaFree(cudaMatrixINT->coef);
}
void __device__ GetFDev_TNNP(int i2d, int pitch, real beta, real Cm, real t, real dt, int totpoints, real rx, gateType g_dev, gateType g_devF) {
/*------------------------------------------------------------------------
* return if outside domain
*------------------------------------------------------------------------
*/
if (i2d >= totpoints) {
return;
}
real V,Ek,Ena,Eks,Eca;
real Iion,INa,ICaL,Ito,IKr,IKs,IK1,INaCa,INaK,IpCa,IpK,IbNa,IbCa;
real Nai,Ki,m,h,j,xr1,xr2,xs,r,s,d,f,fca,g;
real Caisquare,Cai,CaSRsquare,CaCurrent,A,Irel,Ileak,SERCA,CaSRCurrent;
real CaSR,CaCSQN,dCaSR,bjsr,cjsr,CaBuf,dCai,bc,cc;
real AM,BM,TAU_M,M_INF,AH_1,BH_1,TAU_H,AH_2,BH_2,H_INF,AJ_1,BJ_1;
real TAU_J,AJ_2,BJ_2,J_INF,Xr1_INF,axr1,bxr1,TAU_Xr1,Xr2_INF,axr2,bxr2;
real TAU_Xr2,Xs_INF,Axs,Bxs,TAU_Xs,R_INF,S_INF,TAU_R,TAU_S,D_INF,Ad,Bd,Cd;
real TAU_D,F_INF,TAU_F,FCa_INF,G_INF,fcaold,gold;
real rec_iK1,rec_ipK,rec_iNaK,Ak1,Bk1;
real CaiNew,CaSRNew,fcatemp,gtemp;
real V = g_dev.vm[i2d];
real Cai = g_dev.Cai[i2d];
real CaSR = g_dev.CaSR[i2d];
real Nai = g_dev.Nai[i2d];
real Ki = g_dev.Ki[i2d];
real m = g_dev.m[i2d];
real h = g_dev.h[i2d];
real j = g_dev.j[i2d];
real xr1 = g_dev.xr1[i2d];
real xr2 = g_dev.xr2[i2d];
real xs = g_dev.xs[i2d];
real r = g_dev.r[i2d];
real s = g_dev.s[i2d];
real d = g_dev.d[i2d];
real f = g_dev.f[i2d];
real fca = g_dev.fca[i2d];
real g = g_dev.g[i2d];
/*------------------------------------------------------------------------
* setting local variables
*------------------------------------------------------------------------
*/
real fv = g_devF.vm[i2d];
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(V-Ek-200)));
Bk1=(3.*exp(0.0002*(V-Ek+100))+exp(0.1*(V-Ek-10)))/(1.+exp(-0.5*(V-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*V*F/(R*T))+0.0353*exp(-V*F/(R*T))));
rec_ipK=1./(1.+exp((25-V)/5.98));
INa=TNNP_GNa*m*m*m*h*j*(V-Ena);
ICaL=TNNP_GCaL*d*f*fca*4*V*(F*F/(R*T))*
(exp(2*V*F/(R*T))*Cai-0.341*Cao)/(exp(2*V*F/(R*T))-1.);
Ito=TNNP_Gto*r*s*(V-Ek);
IKr=TNNP_Gkr*sqrt(Ko/5.4)*xr1*xr2*(V-Ek);
IKs=TNNP_Gks*xs*xs*(V-Eks);
IK1=TNNP_GK1*rec_iK1*(V-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*V*F/(R*T))))*
(exp(n*V*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*V*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=TNNP_GpCa*Cai/(KpCa+Cai);
IpK=TNNP_GpK*rec_ipK*(V-Ek);
IbNa=TNNP_GbNa*(V-Ena);
IbCa=TNNP_GbCa*(V-Eca);
Iion = IKr+IKs+IK1+Ito+INa+IbNa+ICaL+IbCa+INaK+INaCa+IpCa+IpK;
/* Update Auxilliary Variables */
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464*CaSRsquare/(0.0625+CaSRsquare)+0.008232;
Irel=A*d*g;
Ileak=0.00008*(CaSR-Cai);
SERCA=Vmaxup/(1.+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSRNew=(sqrt(bjsr*bjsr+4*cjsr)-bjsr)/2;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
CaiNew=(sqrt(bc*bc+4*cc)-bc)/2;
AM=1./(1.+exp((-60.-V)/5.));
BM=0.1/(1.+exp((V+35.)/5.))+0.10/(1.+exp((V-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-V)/9.03))*(1.+exp((-56.86-V)/9.03)));
if (V>=-40.){
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(V+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else{
AH_2=(0.057*exp(-(V+80.)/6.8));
BH_2=(2.7*exp(0.079*V)+(3.1e5)*exp(0.3485*V));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((V+71.55)/7.43))*(1.+exp((V+71.55)/7.43)));
if(V>=-40.){
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*V)/(1.+exp(-0.1*(V+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else{
AJ_2=(((-2.5428e4)*exp(0.2444*V)-(6.948e-6)*
exp(-0.04391*V))*(V+37.78)/
(1.+exp(0.311*(V+79.23))));
BJ_2=(0.02424*exp(-0.01052*V)/(1.+exp(-0.1378*(V+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-V)/7.));
axr1=450./(1.+exp((-45.-V)/10.));
bxr1=6./(1.+exp((V-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((V-(-88.))/24.));
axr2=3./(1.+exp((-60.-V)/20.));
bxr2=1.12/(1.+exp((V-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-V)/14.));
Axs=1100./(sqrt(1.+exp((-10.-V)/6)));
Bxs=1./(1.+exp((V-60.)/20.));
TAU_Xs=Axs*Bxs;
R_INF=1./(1.+exp((20-V)/6.));
S_INF=1./(1.+exp((V+20)/5.));
TAU_R=9.5*exp(-(V+40.)*(V+40.)/1800.)+0.8;
TAU_S=85.*exp(-(V+45.)*(V+45.)/320.)+5./(1.+exp((V-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-V)/7.5));
Ad=1.4/(1.+exp((-35-V)/13))+0.25;
Bd=1.4/(1.+exp((V+5)/5));
Cd=1./(1.+exp((50-V)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((V+20)/7));
TAU_F=1125*exp(-(V+27)*(V+27)/240)+80+165/(1.+exp((25-V)/10));
FCa_INF=(1./(1.+pow((CaiNew/0.000325),8))+
0.1/(1.+exp((CaiNew-0.0005)/0.0001))+
0.20/(1.+exp((CaiNew-0.00075)/0.0008))+
0.23 )/1.46;
if(CaiNew<0.00035) G_INF=1./(1.+pow((CaiNew/0.00035),6));
else G_INF=1./(1.+pow((CaiNew/0.00035),16));
fv -= Iion;
g_devF.vm[i2d] = fv;
g_devF.Cai[i2d] = (CaiNew-Cai)/dt;
g_devF.CaSR[i2d] = (CaSRNew-CaSR)/dt;
g_devF.Nai[i2d] = -(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
g_devF.Ki[i2d] = -(IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
g_devF.m[i2d] = (M_INF-(M_INF-m)*exp(-dt/TAU_M)-m)/dt;
g_devF.h[i2d] = (H_INF-(H_INF-h)*exp(-dt/TAU_H)-h)/dt;
g_devF.j[i2d] = (J_INF-(J_INF-j)*exp(-dt/TAU_J)-j)/dt;
g_devF.xr1[i2d] = (Xr1_INF-(Xr1_INF-xr1)*exp(-dt/TAU_Xr1)-xr1)/dt;
g_devF.xr2[i2d] = (Xr2_INF-(Xr2_INF-xr2)*exp(-dt/TAU_Xr2)-xr2)/dt;
g_devF.xs[i2d] = (Xs_INF-(Xs_INF-xs)*exp(-dt/TAU_Xs)-xs)/dt;
g_devF.s[i2d] = (S_INF-(S_INF-s)*exp(-dt/TAU_S)-s)/dt;
g_devF.r[i2d] = (R_INF-(R_INF-r)*exp(-dt/TAU_R)-r)/dt;
g_devF.d[i2d] = (D_INF-(D_INF-d)*exp(-dt/TAU_D)-d)/dt;
g_devF.f[i2d] = (F_INF-(F_INF-f)*exp(-dt/TAU_F)-f)/dt;
fcaold = fca;
fcatemp = FCa_INF-(FCa_INF-fca)*exp(-dt/taufca);
if(fcatemp>fcaold && (V)>-60){
g_devF.fca[i2d] =(fcaold-fca)/dt;
}
else{
g_devF.fca[i2d] =(fcatemp-fca)/dt;
}
gold = g;
gtemp = G_INF-(G_INF-g)*exp(-dt/taug);
if(gtemp>gold && (V)>-60){
g_devF.g[i2d] = (gold-g)/dt;
}
else{
g_devF.g[i2d]= (gtemp-g)/dt;
}
} |
23a5f587d2c36a71e0b23752711703e999a4ee64.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//im3D ,
// : bzr checkout bzr+ssh://photon/Save/BZR-for-all/lev/im3D
//: VadimLevchenko@mail.ru
// : ./im3D <--> [<--> ...]
// 100 1500
//, aivlib- drp
#include "cuda_math.h"
#include "fpal.h"
#include "im2D.h"
#include "im3D.hpp"
image2D im2D;
image_pars imHost; __constant__ image_pars im;
__constant__ im3D_pars im3D;
float runTime=0.0, SmoothFPS=0.0;
bool recalc_at_once=true, recalc_always=false, save_anim_flag=false, draw_edges_flag=false;
int anim_acc=0, render_type=3;
texture<floatT4im, hipTextureType3D> data3D_tex;
hipArray* data3D_texArray=0;
texture<short2, hipTextureType3D> data3Dsurf_tex;
hipArray* data3Dsurf_texArray=0;
const char* optfName="im3DI.opt";//
FILE* gpPipe=0;
int sec1Daxis=0;
//#include <string.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/types.h>
//#include <time.h>
#include <malloc.h>
char WinTitle[1024], addTitleStr[5]; int TitleStrInd=0;
const char* baseTitleStr="2"; int baseTitleFlag=1;
int optfid=-1; int im3DIopt_shift=0;
void im3D_pars4save::load_from_file(const char* fn) {
int optfid=open(fn, O_RDWR|O_CREAT, 0644);
if(optfid<0) { printf(" %s, \n", fn); return; }
int sz,rs;
rs = read(optfid, &sz, sizeof(sz));
if(sz<=0 || sz>sizeof(fpal_pars)) { printf("Illegal Drop format\n"); return; }
rs=read(optfid, &imHost, sz); printf("Load %dB fpal of %ldB", rs, sizeof(fpal_pars));
rs = read(optfid, &sz, sizeof(sz));
if(sz<=0 || sz>sizeof(im3D_pars4save)) { printf("Illegal Drop format\n"); return; }
rs=read(optfid, this, sz); printf(" & %dB im3D of %ldB\n", rs, sizeof(im3D_pars4save));
close(optfid);
}
char* im3D_pars::reset_title() {
char* pTitle=WinTitle, TitleStr[20];
if(baseTitleFlag%3>0) strcpy(TitleStr,baseTitleStr);
strncpy(TitleStr+strlen(TitleStr),addTitleStr,4);
if(baseTitleFlag%3==1 && fName) { sprintf(pTitle, "%s ", fName); pTitle += strlen(pTitle); }
if(baseTitleFlag%3==2 && dfName) { sprintf(pTitle, "%s ", dfName); pTitle += strlen(pTitle); }
if(strpbrk(TitleStr,"23")) { sprintf(pTitle, "(%dx%dx%d)", Nx,Ny,Nz); pTitle += strlen(pTitle); }
if(strpbrk(TitleStr,"xyzXYZ")) { sprintf(pTitle, "/(%dx%dx%d)", ix0,iy0,iz0); pTitle += strlen(pTitle); }
if(strpbrk(TitleStr,"aA\001\023=-+_06789")) { sprintf(pTitle, " %g<f<%g", imHost.fmin,imHost.fmax); pTitle += strlen(pTitle); }
if(strpbrk(TitleStr,"pP[]|?{}")) { sprintf(pTitle, " pal[%d]:(%g)^%g*%g*%g|%g;", imHost.palID, imHost.pscale, imHost.gamma_pal, imHost.brightness_coff, imHost.max_rgb, imHost.base_val); pTitle += strlen(pTitle); }
if(strpbrk(TitleStr,"dDjJmM")) { sprintf(pTitle, " D/J/M:%g/%g/%g;", density, opacity, tstep); pTitle += strlen(pTitle); }
#ifdef CALC_TIME_DIAG
extern float calcTime, calcPerf; extern int TimeStep;
if(strpbrk(TitleStr,"bG")) { sprintf(pTitle, " calc: %.2f sec, %.2fG cells/sec; %d steps;", 1e-3*calcTime, calcPerf, TimeStep); pTitle += strlen(pTitle); }
#endif
if(strpbrk(TitleStr,"tT\20")) { sprintf(pTitle, " transp: %s,%d", imHost.transparency_discrete_flag?"discr":"mode",imHost.transparency_mode); pTitle += strlen(pTitle); }
//if(strpbrk(TitleStr,"gG")) { sprintf(pTitle, " ", ); pTitle += strlen(pTitle); }
//sprintf(WinTitle, " %.1f fps", , ,recalc_always?SmoothFPS:1000./runTime);
//printf(WinTitle, " render: %.1f fps", , recalc_always?SmoothFPS:1000./runTime);
return WinTitle;
}
struct RotMatr {
double v[3][3];
RotMatr(int c, double phi) {
phi *= M_PI/180;
int cp=(c+1)%3, cm=(c+2)%3;
for(int i=0; i<3; i++) v[i][c] = v[c][i] = 0,0;
v[c][c] = 1.0;
v[cp][cp] = v[cm][cm] = cos(phi);
v[cm][cp] = sin(phi); v[cp][cm] =-v[cm][cp];
}
void operator *= (RotMatr& M) {
double vo[3][3];
for(int i=0; i<3; i++) for(int j=0; j<3; j++) vo[i][j] = v[i][j];
for(int i=0; i<3; i++) for(int j=0; j<3; j++) {
double vn=0.0;
for(int k=0; k<3; k++) vn += M.v[i][k]*vo[k][j];
v[i][j] = vn;
}
}
};
std::string im3D_pars::getfName() {
char fN[]="image.__________";
if(fName) strncpy(fN, fName, sizeof(fN)-1);//[sizeof(fN)-1] = 0;
if(strrchr(fN,'.')) strrchr(fN,'.')[0] = 0;
if(strrchr(fN,' ')) strrchr(fN,' ')[0] = 0;
if(strrchr(fN,'/')) strrchr(fN,'/')[0] = '_';
return std::string(fN);
}
std::string im3D_pars::getfDropName(const char* ext, int it) {
char drop_name[1024];
sprintf(drop_name, "%s/%s_%04d%s", drop_dir, getfName().c_str(), (it>=0)?it:imHost.nFrame, ext);
return std::string(drop_name);
}
bool im3D_pars::save_png(int it) {
im2D.out2png(getfDropName(".png", it).c_str());
imHost.nFpng++;
return false;
}
__global__ void save_gp3D();
const int tileSz=16, tilesN=16;
bool im3D_pars::save_gp(int it) {
std::string png_name=getfDropName(".png", it);
std::string gp_name=getfDropName(".gp", it);
im2D.out2png(png_name.c_str());
//sprintf( gp_name, "a.gp", fName, imHost.nFrame);
FILE* gp=fopen(gp_name.c_str(), "w"),* old_stdout=stdout;
fprintf(gp, "unset key\n");
fprintf(gp, "unset border\n");
fprintf(gp, "unset xtics\n");
fprintf(gp, "set x2tics border\n");
fprintf(gp, "set x2range [%g:%g]\n", imHost.fmin, imHost.fmax);
fprintf(gp, "unset ytics\n");
//fprintf(gp, "load \"labels.gp\"\n");
//printf("viewRotation: %g, %g\n", viewRotation[0], viewRotation[1]);
//printf("viewTranslation: %g, %g, %g\n", viewTranslation[0], viewTranslation[1], viewTranslation[2]);
if(render_type==3) {
const int Sgp=(tilesN-1)*tileSz;
stdout = gp;
if(CHECK_ERROR(hipDeviceSynchronize())) throw(-1);
hipLaunchKernelGGL(( save_gp3D) , dim3(dim3((im2D.Nx+Sgp-1)/Sgp,(im2D.Ny+Sgp-1)/Sgp)),dim3(dim3(tilesN,tilesN)), 0, 0, );
if(CHECK_ERROR(hipDeviceSynchronize())) throw(-1);
stdout = old_stdout;
}
fprintf(gp, "plot[0:%g][0:%g] \"%s\" binary filetype=png dx=1 dy=1 with rgbimage\n", float(bNx), float(bNy), png_name.c_str());
fprintf(gp, "pause -1\n");
fclose(gp);
if(type_diag_flag>=0) printf(" %s\n", gp_name.c_str());
return false;
}
floatT4im get_val_from_arr3D(int ix, int iy, int iz);
void reset(im3D_pars* p=0);
#if DATA_VECTOR_SZ==1
std::string im3D_pars::save_section(int it) {
printf("f(%d,%d,%d) = %g\n", ix0, iy0, iz0, get_val_from_arr3D(ix0, iy0, iz0));
std::string dat_name=getfDropName(".dat",it);
FILE* dat=fopen(dat_name.c_str(), "w");
for(int i=0; i<Nx; i++) fprintf(dat, "%d %g\n", i, get_val_from_arr3D(i, iy0, iz0));
fprintf(dat, "\n\n");
for(int i=0; i<Ny; i++) fprintf(dat, "%d %g\n", i, get_val_from_arr3D(ix0, i, iz0));
fprintf(dat, "\n\n");
for(int i=0; i<Nz; i++) fprintf(dat, "%d %g\n", i, get_val_from_arr3D(ix0, iy0, i));
fclose(dat);
return dat_name;
}
void im3D_pars::plot_section() {
const char* re=gpPipe?"re":"";
if(gpPipe==NULL) gpPipe = popen("gnuplot", "w");
int sec[]={ix0,iy0,iz0,ix0,iy0};
if(sec1Daxis<3) fprintf(gpPipe, "set style data l;\n%splot '%s' i %d t '%c:(%d,%d)'\n", re, save_section().c_str(), sec1Daxis, "xyz"[sec1Daxis], sec[sec1Daxis+1], sec[sec1Daxis+2]);
else fprintf(gpPipe, "set style data l;\n%splot '%s' u ($1-%d):2 i 0 t '(ix-%d)', '' u ($1-%d):2 i 1 t '(iy-%d)', '' u ($1-%d):2 i 2 t '(iz-%d)'\n", re, save_section().c_str(), ix0,ix0,iy0,iy0,iz0,iz0);
fflush(gpPipe);
}
#elif DATA_VECTOR_SZ==3
std::string im3D_pars::save_section(int it) {
floatT4im v=get_val_from_arr3D(ix0, iy0, iz0);
printf("f(%d,%d,%d) = (%g,%g,%g,%g)\n", ix0, iy0, iz0, v.x, v.y, v.z, v.w);
std::string dat_name=getfDropName(".dat",it);
FILE* dat=fopen(dat_name.c_str(), "w");
for(int i=0; i<Nx; i++) {
floatT4im v=get_val_from_arr3D(i, iy0, iz0);
fprintf(dat, "%d %g %g %g %g\n", i, v.x, v.y, v.z, v.w);
}
fprintf(dat, "\n\n");
for(int i=0; i<Ny; i++) {
floatT4im v=get_val_from_arr3D(ix0, i, iz0);
fprintf(dat, "%d %g %g %g %g\n", i, v.x, v.y, v.z, v.w);
}
fprintf(dat, "\n\n");
for(int i=0; i<Nz; i++) {
floatT4im v=get_val_from_arr3D(ix0, iy0, i);
fprintf(dat, "%d %g %g %g %g\n", i, v.x, v.y, v.z, v.w);
}
fclose(dat);
return dat_name;
}
void im3D_pars::plot_section() {
const char* re=gpPipe?"re":(render_type==3?"s":"");
if(gpPipe==NULL) gpPipe = popen("gnuplot", "w");
int sec[]={ix0,iy0,iz0,ix0,iy0};
if(render_type==3) {
if(sec1Daxis<3) fprintf(gpPipe, "set ticslevel 0; set style data lp;\n%splot '%s' u 2:3:4 i %d t '%c:(%d,%d)'\n", re, save_section().c_str(), sec1Daxis, "xyz"[sec1Daxis], sec[sec1Daxis+1], sec[sec1Daxis+2]);
else fprintf(gpPipe, "set ticslevel 0; set style data lp;\n%splot '%s' u 2:3:4 i 0 t '(ix-%d)', '' u 2:3:4 i 1 t '(iy-%d)', '' u 2:3:4 i 2 t '(iz-%d)'\n", re, save_section().c_str(), ix0,iy0,iz0);
} else if(render_type==2) {
if(sec1Daxis<3) fprintf(gpPipe, "set style data l;\n%splot '%s' u 1:2 i %d t '[%d,%d].x', '' u 1:3 i %d t '.y', '' u 1:4 i %d t '.z'\n", re, save_section().c_str(), sec1Daxis, sec[sec1Daxis+1], sec[sec1Daxis+2], sec1Daxis, sec1Daxis);
else fprintf(gpPipe, "set style data l;\n%splot '%s' u ($1-%d):2 i 0 t '(ix-%d).x', '' u ($1-%d):3 i 0 t '.y', '' u ($1-%d):4 i 0 t '.z', '' u ($1-%d):2 i 1 t '(iy-%d).x', '' u ($1-%d):3 i 1 t '.y', '' u ($1-%d):4 i 1 t '.z', '' u ($1-%d):2 i 2 t '(iz-%d).x', '' u ($1-%d):3 i 2 t '.y', '' u ($1-%d):4 i 2 t '.z'\n", re, save_section().c_str(), ix0,ix0,ix0,ix0,iy0,iy0,iy0,iy0,iz0,iz0,iz0,iz0);
//else fprintf(gpPipe, "set style data l;\n%splot '%s' u ($1-%d):2 i 0 t '(ix-%d)', '' u ($1-%d):2 i 1 t '(iy-%d)', '' u ($1-%d):2 i 2 t '(iz-%d)'\n", re, save_section().c_str(), ix0,ix0,iy0,iy0,iz0,iz0);
}
fflush(gpPipe);
}
#endif
void im3D_pars::clear4exit() {
im2D.clear();
CHECK_ERROR(hipFreeArray(data3D_texArray));
CHECK_ERROR(hipFreeArray(data3Dsurf_texArray));
CHECK_ERROR(hipFree(randArr));
}
void save_bmp4backgrownd();
any_idle_func_struct xyz_void,* xyz=&xyz_void;
struct idle_func_struct3D: public any_idle_func_struct {
float* par, val;
void set(float* _par, float _val) { par = _par; val = _val; }
void step() { *par += val; }
} xyz3D;
struct idle_func_struct2D: public any_idle_func_struct {
int* i0, N, di;
void set(int* _i0, int _N, int _di) { i0=_i0; N=_N; di=_di; }
void step() { *i0 += di; if(*i0<0) *i0=N-1; else if(*i0>=N) *i0=0; }
} xyz2D;
idle_func_calc icalc;
template<class Tflt>
struct idle_func_calcNdrop: public idle_func_calc {
FILE* sensorsStr;
int* sensors;
int Nsensors;
idle_func_calcNdrop(): Nsensors(0), sensors(0), sensorsStr(0) {}
~idle_func_calcNdrop() { delete sensors; }
void add_sensor(int ix, int iy, int iz) {
int* pi=sensors;
for(int i=0; i<Nsensors; i++, pi+=3) if(pi[0] == ix && pi[1] == iy && pi[2] == iz)
{ printf(" (%d,%d,%d) . - !\n", ix, iy, iz); return; }
Nsensors++;
printf(" (%d,%d,%d), %d, <sensors.dat> .\n", ix, iy, iz, Nsensors);
if(sensors == 0) sensors = (int*)malloc(Nsensors*3*sizeof(int));
else sensors = (int*)realloc(sensors, Nsensors*3*sizeof(int));
pi = sensors+3*(Nsensors-1);
pi[0] = ix; pi[1] = iy; pi[2] = iz;
sensorsStr = fopen("sensors.dat", "w");
pi=sensors;
fprintf(sensorsStr, "#");
for(int i=0; i<Nsensors; i++, pi+=3) fprintf(sensorsStr, "\t(%d,%d,%d)", pi[0],pi[1],pi[2]);
fprintf(sensorsStr, "\n");
fclose(sensorsStr);
}
#if DATA_VECTOR_SZ==1
void step() {
idle_func_calc::step();
if(Nsensors==0) return;
sensorsStr = fopen("sensors.dat", "a");
fprintf(sensorsStr, "%g", t);
int* pi=sensors;
for(int i=0; i<Nsensors; i++, pi+=3) fprintf(sensorsStr, "\t%g", get_val_from_arr3D(pi[0], pi[1], pi[2]));
fprintf(sensorsStr, "\n");
fclose(sensorsStr);
}
void plot_sensors() {
if(Nsensors==0) return;
if(gpPipe==NULL) gpPipe = popen("gnuplot", "w");
int* pi=sensors;
fprintf(gpPipe, "set style data l;\nplot 'sensors.dat' u 1:2 t '%d,%d,%d'", pi[0],pi[1],pi[2]); pi+=3;
for(int i=1; i<Nsensors; i++, pi+=3) fprintf(gpPipe, ", '' u 1:%d t '%d,%d,%d'", i+2, pi[0],pi[1],pi[2]);
fprintf(gpPipe, "\n");
fflush(gpPipe);
}
#elif DATA_VECTOR_SZ==3
void step() {
idle_func_calc::step();
if(Nsensors==0) return;
sensorsStr = fopen("sensors.dat", "a");
fprintf(sensorsStr, "%g", t);
int* pi=sensors;
for(int i=0; i<Nsensors; i++, pi+=3) {
floatT4im v=get_val_from_arr3D(pi[0], pi[1], pi[2]);
fprintf(sensorsStr, "\t%g\t%g\t%g", v.x, v.y, v.z);
}
fprintf(sensorsStr, "\n");
fclose(sensorsStr);
}
void plot_sensors() {
if(Nsensors==0) return;
if(gpPipe==NULL) gpPipe = popen("gnuplot", "w");
int* pi=sensors;
if(render_type==3) {
fprintf(gpPipe, "set style data lp; set ticslevel 0;\nsplot 'sensors.dat' u 2:3:4 t '%d,%d,%d'", pi[0],pi[1],pi[2]); pi+=3;
for(int i=1; i<Nsensors; i++, pi+=3) fprintf(gpPipe, ", '' u %d:%d:%d t '%d,%d,%d'", 3*i+2,3*i+3,3*i+4, pi[0],pi[1],pi[2]);
} else if(render_type==2) {
fprintf(gpPipe, "set style data l;\nplot 'sensors.dat' u 1:2 t '[%d,%d,%d].x', '' u 1:3 t '[%d,%d,%d].y', '' u 1:4 t '[%d,%d,%d].z'", pi[0],pi[1],pi[2], pi[0],pi[1],pi[2], pi[0],pi[1],pi[2]); pi+=3;
for(int i=1; i<Nsensors; i++, pi+=3) fprintf(gpPipe, ", '' u 1:%d t '[%d,%d,%d].x', '' u 1:%d t '[%d,%d,%d].y', '' u 1:%d t '[%d,%d,%d].z'", 3*i+2, pi[0],pi[1],pi[2],3*i+3, pi[0],pi[1],pi[2],3*i+4, pi[0],pi[1],pi[2]);
}
fprintf(gpPipe, "\n");
fflush(gpPipe);
}
#endif
};
idle_func_calcNdrop<floatT4im> icalcNdrop;
//void add_sensor(int ix, int iy, int iz) { icalcNdrop.add_sensor(ix, iy, iz); }
#include<hiprand/hiprand.h>
#include<hiprand/hiprand_kernel.h>
__global__ void init_rand(hiprandState_t *states, float* randArr) {
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
hiprand_init(1234, tid, 0, &states[tid]); // Initialize CURAND
randArr[tid] = 2.*M_PI*hiprand_uniform (&states[tid]); // between 0 and 1
}
__device__ float get_float4lim(float v) { return v; }
__device__ float get_float4lim(float2 v) { return length(v); }
__device__ float get_float4lim(float4 v) { return length(v); }
__global__ void calc_limits3D(uint3 IB, uint3 IE, uint3 blkSz, uint3 Nthr, float2* fLims) {
float2 fLim;
IB+=blkSz*blockIdx*make_uint3(blockDim)+make_uint3(threadIdx.x/(Nthr.y*Nthr.z), (threadIdx.x/Nthr.z)%Nthr.y, threadIdx.x%Nthr.z);
IE=min(IE,IB+blkSz);
//if(threadIdx.x==0) printf("Blk %d from (%d,%d,%d) to (%d,%d,%d)\n",blockIdx.x+gridDim.x*(blockIdx.y+gridDim.y*blockIdx.z),IB.x,IB.y,IB.z, IE.x,IE.y,IE.z);
fLim.x = fLim.y = get_float4lim(tex3D(data3D_tex, IB.x,IB.y,IB.z));
for(int ix=IB.x; ix<IE.x; ix+=Nthr.x) for(int iy=IB.y; iy<IE.y; iy+=Nthr.y) for(int iz=IB.z; iz<IE.z; iz+=Nthr.z) {
float v=get_float4lim(tex3D(data3D_tex, ix,iy,iz));
if(v<fLim.x) fLim.x = v;
if(v>fLim.y) fLim.y = v;
}
__shared__ float2 fLim_sh[512];
fLim_sh[threadIdx.x] = fLim;
__syncthreads();
if(threadIdx.x >= warpSize) return;
for(int i=threadIdx.x; i<blockDim.x; i+=warpSize) {
float2 v=fLim_sh[i];
if(v.x<fLim.x) fLim.x = v.x;
if(v.y>fLim.y) fLim.y = v.y;
}
fLim_sh[threadIdx.x] = fLim;
if(threadIdx.x>0) return;
for(int i=0; i<warpSize; i++) {
float2 v=fLim_sh[i];
if(v.x<fLim.x) fLim.x = v.x;
if(v.y>fLim.y) fLim.y = v.y;
}
fLims[blockIdx.x+gridDim.x*(blockIdx.y+gridDim.y*blockIdx.z)] = fLim;
//printf("Lim (%d,%d,%d) %d => %g %g\n",blockIdx.x,blockIdx.y,blockIdx.z, blockIdx.x+gridDim.x*(blockIdx.y+gridDim.y*blockIdx.z), fLim.x,fLim.y);
}
float2 set_lim_from_tex(uint3 IB, uint3 N) {
//if(N.x*N.y*N.z<512) { printf("Too small picture\n"); return make_float2(0.,1.); }
int ind=0; uint3 Ns=N, Nthr;
if(Ns.x<Ns.y) { ind += 3; int t=Ns.x; Ns.x=Ns.y; Ns.y=t; }
if(Ns.y<Ns.z) { ind ++; int t=Ns.y; Ns.y=Ns.z; Ns.z=t; }
if(Ns.x<Ns.y) { ind ++; int t=Ns.x; Ns.x=Ns.y; Ns.y=t; }
for(Nthr.z=1; Nthr.z<8&&Nthr.z<Ns.z; Nthr.z*=2);
for(Nthr.y=1; Nthr.y*Nthr.z<64&&Nthr.y<Ns.y; Nthr.y*=2);
for(Nthr.x=1; Nthr.x*Nthr.y*Nthr.z<512&&Nthr.x<Ns.x; Nthr.x*=2);
//printf("set Lim from tex: from (%d,%d,%d) size (%d,%d,%d); ind %d; Nthr: (%d,%d,%d)\n",IB.x,IB.y,IB.z, N.x,N.y,N.z, ind, Nthr.x,Nthr.y,Nthr.z);
if(ind%3==2) { int t=Nthr.x; Nthr.x=Nthr.y; Nthr.y=t; }
if(ind%3>=1) { int t=Nthr.y; Nthr.y=Nthr.z; Nthr.z=t; }
if(ind >=3) { int t=Nthr.x; Nthr.x=Nthr.y; Nthr.y=t; }
uint3 Sblk=make_uint3(512), Nblk=(N+(Sblk-1))/Sblk;
int NNblk=Nblk.x*Nblk.y*Nblk.z;
float2 fLim,* fLims=0,* fLimsD=0;
if(CHECK_ERROR(hipMalloc((void**) &fLimsD, NNblk*sizeof(float2)))) throw(-1);
//printf("Lim: %d*%d*%d => %d Blks, %d %d %d Thrs\n",Nblk.x,Nblk.y,Nblk.z,NNblk, Nthr.x,Nthr.y,Nthr.z);
hipLaunchKernelGGL(( calc_limits3D), dim3(Nblk),dim3(Nthr.x*Nthr.y*Nthr.z), 0, 0, IB, IB+N, Sblk, Nthr, fLimsD);
fLims=new float2[NNblk];
if(CHECK_ERROR(hipMemcpy(fLims, fLimsD, NNblk*sizeof(float2), hipMemcpyDeviceToHost))) throw(-1);
CHECK_ERROR(hipFree(fLimsD));
fLim = *fLims;
for(int i=1; i<NNblk; i++) {
if(fLims[i].x<fLim.x) fLim.x = fLims[i].x;
if(fLims[i].y>fLim.y) fLim.y = fLims[i].y;
}
delete fLims;
return fLim;
}
int print_help();
void im3D_pars::print_help() {
::print_help();
printf("\
======= :\n\
ESC \t \n\
32 \t 3D2D (%dD)\n\
4 \t 3D / / \n\
<EnterBackSpace>\t \n\
wW \t %s , , \n\
rR \t \n\
Ctr-r\t \n\
fF \t \n\
vV \t (%d)\n\
Ctr-v\t , , \n\
Ctr-w\t \n\
sS \t png| gnuplot\n\
~ \t (x0,y0,z0), xyz 2D\n\
#$%% \t 3D: \n\
@ \t : / /2D / 3D\n\
! \t \n\
Ctr-z\t , , , 3D\n\
kK \t \n\
Ctr-k\t ( 2D)\n\
mM \t (%g), : \n\
eE \t (%g),\n\
dD \t (%g)\n\
jJ \t (%g)\n\
Ctr-f\t ( 3D/point 2D)\n\
Ctr-d\t 3D \n\
Ctr-L\t <add.opt>, : 1 , \n\
aA \t fMin..fMax\n\
Ctr-a\t fMin..fMax \n\
Ctr-s\t , \n\
1 \t (, xyz) , gnuplot (%c)\n\
oCtr-o\t gnuplot \n\
OQ \t (x0,y0,z0): , \n\
qCtr-q\t sensors.dat gnuplot \n\
======= :\n\
gG \t GLUT (%d)\n\
xyzXYZ\t 2D, 3D : (%d,%d,%d)\n\
xyzXYZ\t 3D: x,y,z (%g,%g,%g)\n\
uU \t 3D: (%g)\n\
======= (LRM --- ):\n\
L \t 2D , \n\
LRM \t 3D: \n\
Ctr-L\t \n\
( 20 ):\n\
LRM \t , x- \n\
LR \t Ctl-t ( ) \n\
", render_type, optfName, type_diag_flag, tstep, randR, density, opacity, "xyz"[sec1Daxis], recalc_always, ix0, iy0, iz0, viewRotation[0], viewRotation[1], viewRotation[2], viewTranslation[2]);
imHost.print_help();
}
// normal shift Ctrl
//DEL
//` 67 %^&* `1234567890
// i I e yu []
// ;' H :" d ghj ;'\
// n ,. BN <> zx bnm,./
bool im3D_pars::key_func(unsigned char key, int x, int y) {
recalc_at_once=true;
size_t rs=0;
if(key != addTitleStr[TitleStrInd]) addTitleStr[(TitleStrInd++)%4] = key;
switch(key) {
case 'A': imHost.set_lim(fMin, fMax); return true;
case 'a': { float2 fLim=set_lim_from_tex(make_uint3(0,0,0), make_uint3(Nx,Ny,Nz)); imHost.set_lim(fLim.x, fLim.y); } return true;
// case 'a': { float2 fLim=make_float2(-0.15,+0.15); imHost.set_lim(fLim.x, fLim.y); } return true;
case 1: { fMin = imHost.fmin; fMax = imHost.fmax; } return true;
case 19: { float2 fLim=make_float2(-1,1);
switch(sec1Daxis) {
case 0: fLim=set_lim_from_tex(make_uint3(ix0,0,0), make_uint3(1,Ny,Nz)); break;
case 1: fLim=set_lim_from_tex(make_uint3(0,iy0,0), make_uint3(Nx,1,Nz)); break;
case 2: fLim=set_lim_from_tex(make_uint3(0,0,iz0), make_uint3(Nx,Ny,1)); break;
}; imHost.set_lim(fLim.x, fLim.y);
} return true;
case 18: ::reset(this); return true;
case 'w': {
printf("Drop %ldB fpal & %ldB im3D\n", sizeof(fpal_pars), sizeof(im3D_pars4save));
if(optfid>=0 && im3DIopt_shift) rs=lseek(optfid,-im3DIopt_shift, SEEK_CUR);
}
case 'W': if(optfid>=0) {
int sz=sizeof(fpal_pars); im3DIopt_shift = 0;
rs=write(optfid, &sz, sizeof(sz)); im3DIopt_shift += rs;
rs=write(optfid, &imHost, sz); im3DIopt_shift += rs;
sz = sizeof(im3D_pars4save);
rs=write(optfid, &sz, sizeof(sz)); im3DIopt_shift += rs;
rs=write(optfid, this, sz); im3DIopt_shift += rs;
} recalc_at_once=false; return true;
case 'R': if(optfid>=0 && im3DIopt_shift) rs=lseek(optfid,-im3DIopt_shift, SEEK_CUR);
case 'r': if(optfid>=0) {
int sz=ld_sz.x;
im3DIopt_shift = 0;
if(sz==0) {
rs = read(optfid, &sz, sizeof(sz));
if(sz<=0 || sz>sizeof(fpal_pars)) { printf("Illegal Drop format\n"); return true; }
im3DIopt_shift += rs;
}
rs=read(optfid, &imHost, sz); printf("Load %ldB fpal of %ldB", rs, sizeof(fpal_pars)); im3DIopt_shift += rs;
sz=ld_sz.y;
if(sz==0) {
rs = read(optfid, &sz, sizeof(sz));
if(sz<=0 || sz>sizeof(im3D_pars4save)) { printf("Illegal Drop format\n"); return true; }
im3DIopt_shift += rs;
}
rs=read(optfid, this, sz); printf(" & %ldB im3D of %ldB\n", rs, sizeof(im3D_pars4save)); im3DIopt_shift += rs;
initTex();
} return true;
case 'f': if(optfid>=0) lseek(optfid,0, SEEK_SET); recalc_at_once=false; return true;
case 'F': if(optfid>=0) lseek(optfid,0, SEEK_END); recalc_at_once=false; return true;
case 23: baseTitleFlag ++; return true; //recalc_at_once=false;
case 22: recalc_at_once=false;
printf("%s\nFrame %d (%.2f/%.2f fps), last run Times: %7.2f msec\n", WinTitle, imHost.nFrame, SmoothFPS, 1000./runTime, runTime);
return true;
case 'v': recalc_at_once=false; type_diag_flag++; return true;
case 'V': recalc_at_once=false; type_diag_flag--; return true;
case 'S': recalc_at_once=save_gp(); return true;
case 's': recalc_at_once=save_png(imHost.nFpng); return true;
case 'e': randR *= sqrt(sqrt(2)); return true;
case 'E': randR /= sqrt(sqrt(2)); return true;
case 'm': tstep /= sqrt(sqrt(2)); density /= sqrt(sqrt(2)); return true;
case 'M': tstep *= sqrt(sqrt(2)); density *= sqrt(sqrt(2)); return true;
case 'd': density *= sqrt(sqrt(2)); return true;
case 'D': density /= sqrt(sqrt(2)); return true;
case 'j': opacity = 1.0 - (1.0-opacity)/sqrt(sqrt(2)); return true;
case 'J': opacity = 1.0 - (1.0-opacity)*sqrt(sqrt(2)); return true;
case '@': draw_bmp4backgrownd = (draw_bmp4backgrownd+1)%4; return true;
case '#': draw_mesh_flag ^= true; return true;
case '$': draw_box_flag ^= true; return true;
case '%': draw_fg_flag ^= true; return true;
case 4 : dissect_box_flag ^= true; return true;
case '~': draw_sec_xyz_flag ^= true; return true;
case 6 : filterMode_flag ^= true; initTex(); return true;
case '!': save_bmp4backgrownd(); return true;
case '2': render_type=2; initTex(); return true;
case '3': render_type=3; initTex(); return true;
case '4': mode3D=(mode3D+1)%3; return true;//grad_mode ^= true; imHost.palDim = 1 + 2*grad_mode; return true;
case '5': imHost.pal3Daxis = (imHost.pal3Daxis+1)%3; return true;
case 'g': recalc_always=false; return true;
case 'G': recalc_always=true; return true;
case 'Q': recalc_at_once=false; icalcNdrop.add_sensor(ix0, iy0, iz0); return true;
case 'q': icalcNdrop.step(); return true;
case 11 : contour_flag ^= true; return true;
case 'k': contour_width *= 1.2; return true;
case 'K': contour_width /= 1.2; return true;
case 12 : {
FILE* cmd=fopen("add.opt", "r"); if(cmd) {
char str[1024],* argv[2]; argv[0] = str;
while(fgets(str, 1024, cmd)) {
char* c=strchr(str, ' ');
if(c) {
if(*c==' ') *c = 0;
argv[1] = c+1;
} else argv[1] = str;
init_from_command_line(argv);
}
fclose(cmd);
}}
//recalc_at_once=false;
return true;
case 17: recalc_at_once=false; icalcNdrop.plot_sensors(); return true;
case 'O': recalc_at_once=false; save_section(); return true;
case '1': sec1Daxis = (sec1Daxis+1)%4;
printf("1D section for gnuplot set to %c\n","xyzA"[sec1Daxis]);
case 15: recalc_at_once=false;
if(gpPipe) { pclose(gpPipe); gpPipe = 0; }
case 'o': recalc_at_once=false; plot_section(); return true;
case 'b': xyz = &icalcNdrop; return true;
case 26:
RotPoint[0] = float(ix0)/Nx;
RotPoint[1] = float(iy0)/Ny;
RotPoint[2] = float(iz0)/Nz;
return true;
case 'x': case 'X': case 'y': case 'Y': case 'z': case 'Z': case 'u': case 'U':
if(render_type==2 || draw_sec_xyz_flag) { switch(key) {
case 'x': xyz2D.set(&ix0, Nx, 1); break;
case 'X': xyz2D.set(&ix0, Nx,-1); break;
case 'y': xyz2D.set(&iy0, Ny, 1); break;
case 'Y': xyz2D.set(&iy0, Ny,-1); break;
case 'z': xyz2D.set(&iz0, Nz, 1); break;
case 'Z': xyz2D.set(&iz0, Nz,-1); break;
default: return true;
} xyz = &xyz2D; xyz->step();
} else if(render_type==3) { switch(key) {
case 'x': xyz3D.set(&viewRotation[0], 0.5f); break;
case 'X': xyz3D.set(&viewRotation[0],-0.5f); break;
case 'y': xyz3D.set(&viewRotation[1], 0.5f); break;
case 'Y': xyz3D.set(&viewRotation[1],-0.5f); break;
case 'z': xyz3D.set(&viewRotation[2], 0.5f); break;
case 'Z': xyz3D.set(&viewRotation[2],-0.5f); break;
case 'u': xyz3D.set(&viewTranslation[2], 0.01f); break;
case 'U': xyz3D.set(&viewTranslation[2],-0.01f); break;
} xyz = &xyz3D; xyz->step();
}
return true;
case 27: clear4exit(); exit(0);
default:
if(imHost.key_func(key, x, y)) return true;
}
recalc_at_once=false;
if(rs==0) return false;
return false;
}
struct MKstates {
int ox, oy;
int buttonState;
int modState;
MKstates(): ox(0),oy(0), buttonState(0),modState(0) {}
void correct_screen_coor(int& x, int& y) {
x -= im2D.xPos;
y += im2D.yPos-(glutGet(GLUT_WINDOW_HEIGHT)-im2D.Ny);
}
void grabState(int button, int state, int x, int y) {
modState = glutGetModifiers();
if(state == GLUT_DOWN) buttonState |= 1<<button;
else if(state == GLUT_UP) buttonState = 0;
ox = x;
oy = y;
}
} mk_state;
bool im3D_pars::special_func(unsigned char key, int x, int y) {
mk_state.correct_screen_coor(x,y);
if(type_diag_flag>=2) printf("special_func, keyN=%d, coors=(%d,%d)\n", key, x, y);
recalc_at_once=true;
size_t rs=0;
if(key != addTitleStr[TitleStrInd]) addTitleStr[(TitleStrInd++)%4] = key;
int modState = glutGetModifiers(), zoom=10;
if(modState == GLUT_ACTIVE_CTRL) zoom *= 100;
if(modState == GLUT_ACTIVE_SHIFT) zoom *= 10;
if(modState == GLUT_ACTIVE_ALT) zoom /= 10;
switch(key) {
case GLUT_KEY_PAGE_UP: im2D.yPos = glutGet(GLUT_WINDOW_HEIGHT)-im2D.Ny; return true;
case GLUT_KEY_PAGE_DOWN: im2D.yPos = 0; return true;
case GLUT_KEY_DOWN: im2D.yPos += zoom; if(im2D.yPos>0) im2D.yPos=0; return true;
case GLUT_KEY_UP: im2D.yPos -= zoom; {
int yPosMax=glutGet(GLUT_WINDOW_HEIGHT)-im2D.Ny;
if(im2D.yPos<yPosMax) im2D.yPos = yPosMax;
} return true;
case GLUT_KEY_HOME: im2D.xPos = 0; return true;
case GLUT_KEY_END: im2D.xPos = glutGet(GLUT_WINDOW_WIDTH)-im2D.Nx; return true;
case GLUT_KEY_LEFT: im2D.xPos += zoom; if(im2D.xPos>0) im2D.xPos=0; return true;
case GLUT_KEY_RIGHT: im2D.xPos -= zoom; {
int xPosMax=glutGet(GLUT_WINDOW_WIDTH)-im2D.Nx;
if(im2D.xPos<xPosMax) im2D.xPos = xPosMax;
} return true;
}
recalc_at_once=false;
if(rs==0) return false;
return false;
}
void im3D_pars::mouse_func(int button, int state, int x, int y) {
mk_state.correct_screen_coor(x,y);
if(y<20 && state == GLUT_DOWN) {
if(imHost.transparency_discrete_flag) {
int ic=floor(0.5+(imHost.pscale)*float(x)/float(bNx));
switch(button) {
case 0: imHost.transparency_mode |= (1<<ic); break;
case 1: imHost.transparency_mode ^= (1<<ic); break;
case 2: imHost.transparency_mode &= ~(1<<ic); break;
};
} else {
float f=imHost.fmin + x/float(bNx)*(imHost.fmax-imHost.fmin);
switch(button) {
case 0: imHost.set_lim(f,imHost.fmax); break;
case 2: imHost.set_lim(imHost.fmin,f); break;
case 1:
float df=(f-imHost.fmin)>(imHost.fmax-f)?(f-imHost.fmin):(imHost.fmax-f);
imHost.set_lim(f-df,f+df); break;
};
if(type_diag_flag>=3) printf("mouse pal: %d,%d, button %d, state %d\n", x,y, button, state);
recalc_at_once=true;
}
return;
}
mk_state.grabState(button, state, x,y);
if(render_type==3) {
if (state == GLUT_UP) {
RotMatr R=RotMatr(0,viewRotation[0]), Ry=RotMatr(1,viewRotation[1]), Rz=RotMatr(2,viewRotation[2]), RxT=RotMatr(0,viewRotationTmp[0]), RyT=RotMatr(1,viewRotationTmp[1]);
R *= Ry; R *= Rz; R *= RxT; R *= RyT;
/*for(int i=0; i<3; i++) {
printf("(");
float s2=0.0;
for(int j=0; j<3; j++) { printf("\t%g", R.v[i][j]); s2 += R.v[i][j]*R.v[i][j]; }
printf("); %g\n", s2);
}*/
//printf("Mouse: (%g,%g,%g)+(%g,%g) -> ", viewRotation[0], viewRotation[1], viewRotation[2], viewRotationTmp[0], viewRotationTmp[1]);
double Sy=-R.v[2][0], Cy=sqrt(1.-Sy*Sy), phi[3];
phi[1] = atan2(Sy,Cy);
if(Cy>0) {
double Sx=R.v[2][1]/Cy, Cx=R.v[2][2]/Cy; phi[0]=atan2(Sx,Cx);
double Sz=R.v[1][0]/Cy, Cz=R.v[0][0]/Cy; phi[2]=atan2(Sz,Cz);
} else {
double Cxz=R.v[1][1], Sxz=R.v[0][1]*Sy;
phi[0]=atan2(Sxz, Cxz); phi[2]=0;
}
for(int i=0; i<3; i++) viewRotationTmp[i] = 0;
for(int i=0; i<3; i++) viewRotation[i] = phi[i]*180.0/M_PI;
//printf(" (%g,%g,%g)\n", viewRotation[0], viewRotation[1], viewRotation[2]);
}
} else {
if (state == GLUT_DOWN && mk_state.modState != GLUT_ACTIVE_CTRL) { if(0<=x && x<bNx && 0<=y && y<bNy) reset0(x,bNy-1-y); }
}
recalc_at_once=true;
glutPostRedisplay();
}
void im3D_pars::motion_func(int x, int y) {
mk_state.correct_screen_coor(x,y);
if(type_diag_flag>=3) printf("motion func: %d,%d -> %d,%d\n",mk_state.ox,mk_state.oy, x,y);
if(y<20) {
return;
}
float dx, dy;
dx = (float)(x - mk_state.ox);
dy = (float)(y - mk_state.oy);
if(render_type==2) {
if(mk_state.modState == GLUT_ACTIVE_CTRL) {
shift0(mk_state.ox,bNy-1-mk_state.oy, x,bNy-1-y);
}
} else {
if(mk_state.modState == GLUT_ACTIVE_CTRL) {
eyePoint.x = x;
eyePoint.y = bNy-y;
} else {
if (mk_state.buttonState == 4) // right = zoom
viewTranslation[2] += dy / 100.0f;
else if (mk_state.buttonState == 2) { // middle = translate
viewTranslation[0] += dx / 100.0f;
viewTranslation[1] -= dy / 100.0f;
}
else if (mk_state.buttonState == 1) { // left = rotate
viewRotationTmp[0] += dy / 5.0f; viewRotationTmp[1] += dx / 5.0f;
}
}
}
mk_state.ox = x;
mk_state.oy = y;
recalc_at_once=true;
glutPostRedisplay();
}
//int cfX=0, cfY=0;
__global__ void im3Dclear(uchar4 bgk_col) {
int x=blockIdx.x*blockDim.x+threadIdx.x;
int y=blockIdx.y*blockDim.y+threadIdx.y;
if(y<im3D.bNy && x<im3D.bNx) im.bmp[x+y*im3D.bNx] = bgk_col;
}
template<int cx, int cy, int cz>
__global__ void im3Ddraw_any(int sh, int i0) {
int x1=blockIdx.x*blockDim.x+threadIdx.x, x2=blockIdx.y*blockDim.y+threadIdx.y;
int p1=sh%im3D.bNx+x1, p2=sh/im3D.bNx+x2;
if(0>p1 || p1>=im3D.bNx || 0>p2 || p2>=im3D.bNy) return;
int ix = cx==0?i0:((cx==1?x1:x2)*im3D.x_zoom);
int iy = cy==0?i0:((cy==1?x1:x2)*im3D.y_zoom);
int iz = cz==0?i0:((cz==1?x1:x2)*im3D.z_zoom);
if(ix<im3D.Nx && iy<im3D.Ny && iz<im3D.Nz) {
uchar4 c=im.get_color(tex3D(data3D_tex, ix,iy,iz));
if(im3D.draw_sec_xyz_flag && (abs(ix-im3D.ix0)<20 && abs(iy-im3D.iy0)<20 && abs(iz-im3D.iz0)<20) && (cx>0 && ix==im3D.ix0 || cy>0 && iy==im3D.iy0 || cz>0 && iz==im3D.iz0)) c = make_uchar4(255-c.x,255-c.y,255-c.z,c.w);
#if DATA_VECTOR_SZ==1
if(im3D.contour_flag) {
for(int i=0; i<im3D.cntr_num; i++) {
float vp=tex3D(data3D_tex, ix+im3D.contour_width,iy,iz);
float vm=tex3D(data3D_tex, ix-im3D.contour_width,iy,iz);
float lv=im3D.cntr_levels[i];
if(vp != 0 && vm != 0 && (vp>0 ^ vm<0) && (vp>lv ^ vm>lv)) { c = make_uchar4(255-c.x,255-c.y,255-c.z,c.w); continue; }
vp=tex3D(data3D_tex, ix,iy+im3D.contour_width,iz);
vm=tex3D(data3D_tex, ix,iy-im3D.contour_width,iz);
if(vp != 0 && vm != 0 && (vp>0 ^ vm<0) && (vp>lv ^ vm>lv)) { c = make_uchar4(255-c.x,255-c.y,255-c.z,c.w); continue; }
vp=tex3D(data3D_tex, ix,iy,iz+im3D.contour_width);
vm=tex3D(data3D_tex, ix,iy,iz-im3D.contour_width);
if(vp != 0 && vm != 0 && (vp>0 ^ vm<0) && (vp>lv ^ vm>lv)) { c = make_uchar4(255-c.x,255-c.y,255-c.z,c.w); continue; }
}
//if((1<v && v<1623/1536.) || (-1>v && v>-1623/1536.)) c = make_uchar4(255-c.x,255-c.y,255-c.z,c.w);
}
#endif
im.bmp[sh+x1+x2*im3D.bNx] = c;
}
//if(x1==128 && x2==128) printf("res(%d,%d,%d)=%g\n", ix,iy,iz, tex3D(data3D_tex, ix,iy,iz));
}
__global__ void draw_pal() {
int x=blockIdx.x*blockDim.x+threadIdx.x;
uchar4 col=im.get_color(im.fmin+(float(x)/im3D.bNx)*(im.fmax-im.fmin));
uchar4* bmp = im.bmp+im3D.pal_sh;
for(int y=0; y<20; y++, bmp += im3D.bNx) bmp[x] = col;
}
__global__ void negate() {
int x=blockIdx.x*blockDim.x+threadIdx.x;
uchar4 col=make_uchar4(255,255,255,255);
uchar4* bmp = im.bmp+x;
for(int y=0; y<im3D.bNy; y++) bmp[y*im3D.bNx] = col-bmp[y*im3D.bNx];
}
float invViewMatrix[12];
typedef struct {
float4 m[3];
} float3x4;
// 3D cuda5.5: 2_Graphics/volumeRender/volumeRender_kernel.cu
__constant__ float3x4 c_invViewMatrix; // inverse view matrix
struct Ray {
float3 o; // origin
float3 d; // direction
};
__device__ int intersectBox(Ray r, float3 boxmin, float3 boxmax, float *tnear, float *tfar) {
// compute intersection of ray with all six bbox planes
float3 invR = make_float3(1.0f) / (r.d+1e-5);
float3 tbot = invR * (boxmin - r.o);
float3 ttop = invR * (boxmax - r.o);
// re-order intersections to find smallest and largest on each axis
float3 tmin = fminf(ttop, tbot);
float3 tmax = fmaxf(ttop, tbot);
// find the largest tmin and the smallest tmax
float largest_tmin = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z));
float smallest_tmax = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z));
*tnear = largest_tmin;
*tfar = smallest_tmax;
if(im3D.dissect_box_flag) {
float3 boxmid=boxmin+make_float3(im3D.BoxFactor[0]*(im3D.ix0+1), im3D.BoxFactor[1]*(im3D.iy0+1), im3D.BoxFactor[2]*(im3D.iz0+1));
float3 ttopC= invR * (boxmid - r.o);
float3 tminC = fminf(ttopC, tbot);
float3 tmaxC = fmaxf(ttopC, tbot);
float largest_tminC = fmaxf(fmaxf(tminC.x, tminC.y), fmaxf(tminC.x, tminC.z));
float smallest_tmaxC = fminf(fminf(tmaxC.x, tmaxC.y), fminf(tmaxC.x, tmaxC.z));
if(smallest_tmaxC > largest_tminC && largest_tmin == largest_tminC) *tnear = smallest_tmaxC;
}
return smallest_tmax > largest_tmin;
}
// transform vector by matrix (no translation)
__device__
float3 mul(const float3x4 &M, const float3 &v)
{
float3 r;
r.x = dot(v, make_float3(M.m[0]));
r.y = dot(v, make_float3(M.m[1]));
r.z = dot(v, make_float3(M.m[2]));
return r;
}
// transform vector by matrix with translation
__device__
float4 mul(const float3x4 &M, const float4 &v)
{
float4 r;
r.x = dot(v, M.m[0]);
r.y = dot(v, M.m[1]);
r.z = dot(v, M.m[2]);
r.w = 1.0f;
return r;
}
__device__ uchar4 rgbaFloatToInt(float4 rgba, uchar4 bk) {
float a=rgba.w, da=(1.-a)/255.;
rgba.x = __saturatef(bk.x*da+a*rgba.x); // clamp to [0.0, 1.0]
rgba.y = __saturatef(bk.y*da+a*rgba.y);
rgba.z = __saturatef(bk.z*da+a*rgba.z);
rgba.w = __saturatef(rgba.w);
return make_uchar4((rgba.x*255.f), (rgba.y*255.f), (rgba.z*255.f), (rgba.w*255.f));
}
__device__ uchar4 rgbaFloatToInt(float4 rgba) {
rgba.x = __saturatef(rgba.x); // clamp to [0.0, 1.0]
rgba.y = __saturatef(rgba.y);
rgba.z = __saturatef(rgba.z);
rgba.w = __saturatef(rgba.w);
return make_uchar4((rgba.x*255.f), (rgba.y*255.f), (rgba.z*255.f), (rgba.w*255.f));
}
__global__ void draw_pal3D() {
float x=2.0f*(0.5f+blockIdx.x)/gridDim.x-1.0f, y=2.0f*(0.5f+threadIdx.x)/blockDim.x-1.0f;
float r2=x*x+y*y;
if(r2>1.0f) return;
float r=sqrt(r2), r1=sqrt(1.0f-r2);
uchar4* bmp = im.bmp+(im3D.pal_sh+im3D.bNx*(int(threadIdx.x)-int(blockDim.x/2))+blockIdx.x);
bmp[0] = rgbaFloatToInt(im.get_color_for3D(make_float4(x,y,0,1)));
bmp[blockDim.x] = rgbaFloatToInt(im.get_color_for3D(make_float4(0,y,x,1)));
bmp[2*blockDim.x] = rgbaFloatToInt(im.get_color_for3D(make_float4(x,0,y,1)));
}
__device__ float smooth(float x) { return __saturatef(1.0f-x*x); }
//------------------------------
inline __device__ void set_boxMinMax(float3& boxMin, float3& boxMax) {
float3 boxSize=make_float3(im3D.BoxFactor[0]*im3D.Nx, im3D.BoxFactor[1]*im3D.Ny, im3D.BoxFactor[2]*im3D.Nz);
//boxMax = 0.5f*boxSize;
//boxMin =-0.5f*boxSize;
//boxMax = boxSize;
float3 cntr=(float3&)im3D.RotPoint*boxSize;
boxMax = boxSize-cntr;
boxMin =-cntr;
}
inline __device__ void set_eyeRay(Ray& eyeRay, float x, float y) {
const float dbNxy=2.0f/(im3D.bNx+im3D.bNy);
const int Nsum=im3D.Nx+im3D.Ny+im3D.Nz;
eyeRay.o = make_float3(mul(c_invViewMatrix, make_float4(0.0f, 0.0f, 0.0f, 0.32f*Nsum)));
eyeRay.d = normalize(make_float3((x-im3D.eyePoint.x)*dbNxy, (y-im3D.eyePoint.y)*dbNxy, -2.0f));
eyeRay.d = mul(c_invViewMatrix, eyeRay.d);
}
__device__ uchar4& get_backgrownd(Ray r, float3 boxmin, float3 boxmax, int bmp_sh) {
float3 bkgr_col=(float3&)im3D.bkgr_col, box_shrink=(float3&)im3D.box_shrink;
float3 boxMin=box_shrink*boxmin, boxMax=box_shrink*boxmax;
float3 fcol=make_float3(0);
uchar4& vbmp=im.bmp[bmp_sh];
if(im3D.draw_bmp4backgrownd && im.bmp4backgrownd != 0) vbmp = im.bmp4backgrownd[bmp_sh];
else { fcol = bkgr_col; vbmp = make_uchar4(0,0,0,0); }
if(im3D.draw_mesh_flag || im3D.draw_box_flag) {
float3 invR = make_float3(1.0f) / (r.d+1e-5);
float3 tB = invR * (boxMin - r.o);
float3 tT = invR * (boxMax - r.o);
float tz=r.d.z<0?tB.z:tT.z, xZ=r.o.x+r.d.x*tz, yZ=r.o.y+r.d.y*tz;
float ty=r.d.y<0?tB.y:tT.y, zY=r.o.z+r.d.z*ty, xY=r.o.x+r.d.x*ty;
float tx=r.d.x<0?tB.x:tT.x, yX=r.o.y+r.d.y*tx, zX=r.o.z+r.d.z*tx;
float mval=im3D.Dmesh;
float3 mb=(float3&)im3D.MeshBox;
float3 ms=(float3&)im3D.MeshShift;
if(im3D.draw_box_flag) {
float xZn=xZ-boxmin.x, yZn=yZ-boxmin.y, xZx=boxmax.x-xZ, yZx=boxmax.y-yZ;
float zYn=zY-boxmin.z, xYn=xY-boxmin.x, zYx=boxmax.z-zY, xYx=boxmax.x-xY;
float yXn=yX-boxmin.y, zXn=zX-boxmin.z, yXx=boxmax.y-yX, zXx=boxmax.z-zX;
float zval=im3D.Dmesh, dm=im3D.Dmesh;
if(xZn>=-dm && yZn>=-dm && xZx>=-dm && yZx>=-dm) {
if(im3D.draw_mesh_flag) { mval=fminf(mval,fminf(fabsf(remainderf(xZ-ms.x, mb.x)), fabsf(remainderf(yZ-ms.y, mb.y)))); }
zval=fminf(zval,fminf(fminf(fabs(xZn), fabs(yZn)), fminf(fabs(xZx), fabs(yZx))));
}
if(zYn>=-dm && xYn>=-dm && zYx>=-dm && xYx>=-dm) {
if(im3D.draw_mesh_flag) { mval=fminf(mval,fminf(fabsf(remainderf(zY-ms.z, mb.z)), fabsf(remainderf(xY-ms.x, mb.x)))); }
zval=fminf(zval,fminf(fminf(fabs(xYn), fabs(zYn)), fminf(fabs(xYx), fabs(zYx))));
}
if(yXn>=-dm && zXn>=-dm && yXx>=-dm && zXx>=-dm) {
if(im3D.draw_mesh_flag) { mval=fminf(mval,fminf(fabsf(remainderf(yX-ms.y, mb.y)), fabsf(remainderf(zX-ms.z, mb.z)))); }
zval=fminf(zval,fminf(fminf(fabs(zXn), fabs(yXn)), fminf(fabs(zXx), fabs(yXx))));
}
float zdel=smooth(zval/im3D.Dmesh);
fcol = fcol*(1.0f-zdel)+((float3&)(im3D.box_col))*zdel;
} else {
if(xZ>=boxmin.x && yZ>=boxmin.y && xZ<=boxmax.x && yZ<=boxmax.y) mval=fminf(fabsf(remainderf(xZ-ms.x, mb.x)), fabsf(remainderf(yZ-ms.y, mb.y)));
else if(zY>=boxmin.z && xY>=boxmin.x && zY<=boxmax.z && xY<=boxmax.x) mval=fminf(fabsf(remainderf(zY-ms.z, mb.z)), fabsf(remainderf(xY-ms.x, mb.x)));
else if(yX>=boxmin.y && zX>=boxmin.z && yX<=boxmax.y && zX<=boxmax.z) mval=fminf(fabsf(remainderf(yX-ms.y, mb.y)), fabsf(remainderf(zX-ms.z, mb.z)));
}
if(im3D.draw_mesh_flag) {
float mdel=smooth(mval/im3D.Dmesh);
fcol = fcol*(1.0f-mdel)+((float3&)(im3D.mesh_col))*mdel;
}
}
vbmp = vbmp+make_uchar4(__saturatef(fcol.x)*255, __saturatef(fcol.y)*255, __saturatef(fcol.z)*255, 255);
return vbmp;
}
__device__ uchar4& get_foregrownd(Ray r, float3 boxmin, float3 boxmax, int bmp_sh) {
float3 box_shrink=(float3&)im3D.box_shrink;
float3 boxMin=box_shrink*boxmin, boxMax=box_shrink*boxmax;
float3 fcol=make_float3(0);
uchar4& vbmp=im.bmp[bmp_sh];
if(im3D.draw_mesh_flag || im3D.draw_box_flag) {
float3 invR = make_float3(1.0f) / (r.d+1e-5);
float3 tB = invR * (boxMin - r.o);
float3 tT = invR * (boxMax - r.o);
float tz=r.d.z>0?tB.z:tT.z, xZ=r.o.x+r.d.x*tz, yZ=r.o.y+r.d.y*tz;
float ty=r.d.y>0?tB.y:tT.y, zY=r.o.z+r.d.z*ty, xY=r.o.x+r.d.x*ty;
float tx=r.d.x>0?tB.x:tT.x, yX=r.o.y+r.d.y*tx, zX=r.o.z+r.d.z*tx;
float mval=im3D.Dmesh;
float3 mb=(float3&)im3D.MeshBox;
float3 ms=(float3&)im3D.MeshShift;
if(im3D.draw_box_flag) {
float xZn=xZ-boxmin.x, yZn=yZ-boxmin.y, xZx=boxmax.x-xZ, yZx=boxmax.y-yZ;
float zYn=zY-boxmin.z, xYn=xY-boxmin.x, zYx=boxmax.z-zY, xYx=boxmax.x-xY;
float yXn=yX-boxmin.y, zXn=zX-boxmin.z, yXx=boxmax.y-yX, zXx=boxmax.z-zX;
float zval=im3D.Dmesh, dm=im3D.Dmesh;
if(xZn>=-dm && yZn>=-dm && xZx>=-dm && yZx>=-dm) {
if(im3D.draw_mesh_flag) { mval=fminf(mval,fminf(fabsf(remainderf(xZ-ms.x, mb.x)), fabsf(remainderf(yZ-ms.y, mb.y)))); }
zval=fminf(zval,fminf(fminf(fabs(xZn), fabs(yZn)), fminf(fabs(xZx), fabs(yZx))));
}
if(zYn>=-dm && xYn>=-dm && zYx>=-dm && xYx>=-dm) {
if(im3D.draw_mesh_flag) { mval=fminf(mval,fminf(fabsf(remainderf(zY-ms.z, mb.z)), fabsf(remainderf(xY-ms.x, mb.x)))); }
zval=fminf(zval,fminf(fminf(fabs(xYn), fabs(zYn)), fminf(fabs(xYx), fabs(zYx))));
}
if(yXn>=-dm && zXn>=-dm && yXx>=-dm && zXx>=-dm) {
if(im3D.draw_mesh_flag) { mval=fminf(mval,fminf(fabsf(remainderf(yX-ms.y, mb.y)), fabsf(remainderf(zX-ms.z, mb.z)))); }
zval=fminf(zval,fminf(fminf(fabs(zXn), fabs(yXn)), fminf(fabs(zXx), fabs(yXx))));
}
float zdel=smooth(zval/im3D.Dmesh);
fcol = fcol*(1.0f-zdel)+((float3&)(im3D.box_col))*zdel;
} else {
if(xZ>=boxmin.x && yZ>=boxmin.y && xZ<=boxmax.x && yZ<=boxmax.y) mval=fminf(fabsf(remainderf(xZ-ms.x, mb.x)), fabsf(remainderf(yZ-ms.y, mb.y)));
else if(zY>=boxmin.z && xY>=boxmin.x && zY<=boxmax.z && xY<=boxmax.x) mval=fminf(fabsf(remainderf(zY-ms.z, mb.z)), fabsf(remainderf(xY-ms.x, mb.x)));
else if(yX>=boxmin.y && zX>=boxmin.z && yX<=boxmax.y && zX<=boxmax.z) mval=fminf(fabsf(remainderf(yX-ms.y, mb.y)), fabsf(remainderf(zX-ms.z, mb.z)));
}
if(im3D.draw_mesh_flag) {
float mdel=smooth(mval/im3D.Dmesh);
fcol = fcol*(1.0f-mdel)+((float3&)(im3D.mesh_col))*mdel;
}
}
vbmp = vbmp+make_uchar4(__saturatef(fcol.x)*255, __saturatef(fcol.y)*255, __saturatef(fcol.z)*255, 255);
return vbmp;
}
__device__ void mk_pts(int x, int y, uchar4 col) {
const int ps=2;
if(x+1<ps || x+ps>=im3D.bNx || y+1<ps || y+ps>=im3D.bNy) return;
for(int ix=1-ps; ix<ps; ix++) for(int iy=1-ps; iy<ps; iy++)
im.bmp[(iy+y)*im3D.bNx + x+ix] = col;
}
__device__ void mk_box(int x, int y, uchar4 col) {
if(x<0 || x+tileSz>=im3D.bNx || y<0 || y+tileSz>=im3D.bNy) return;
for(int ix=0; ix<tileSz; ix++) im.bmp[y*im3D.bNx + x+ix] = im.bmp[(tileSz+y)*im3D.bNx + x+ix] = col;
for(int iy=0; iy<tileSz; iy++) im.bmp[(iy+y)*im3D.bNx + x] = im.bmp[(iy+y)*im3D.bNx + x+tileSz] = col;
}
inline bool __device__ is_inside(float2 pt, float2 p0, float2 px, float2 py) {
float v1=(p0.x - pt.x) * (px.y - p0.y) - (px.x - p0.x) * (p0.y - pt.y);
float v2=(px.x - pt.x) * (py.y - px.y) - (py.x - px.x) * (px.y - pt.y);
float v3=(py.x - pt.x) * (p0.y - py.y) - (p0.x - py.x) * (py.y - pt.y);
return (v1*v2>=0.0 && v1*v3>=0.0 && v2*v3>=0.0);
}
inline float2 __device__ pt_inside(float2 pt, float2 p0, float2 px, float2 py) {
float2 res;
res.x = ((pt.x-p0.x)*(py.y-p0.y)-(pt.y-p0.y)*(py.x-p0.x))/((px.x-p0.x)*(py.y-p0.y)-(px.y-p0.y)*(py.x-p0.x));
res.y = ((pt.x-p0.x)*(px.y-p0.y)-(pt.y-p0.y)*(px.x-p0.x))/((py.x-p0.x)*(px.y-p0.y)-(py.y-p0.y)*(px.x-p0.x));
return res;
}
__global__ void save_gp3D() {
__shared__ float2 fm[3][tilesN][tilesN];//
__shared__ int hit[tilesN][tilesN];// : 1-z 2-y 4-x 0-
const int Sgp=(tilesN-1)*tileSz;
int x=blockIdx.x*Sgp+threadIdx.x*tileSz, y=blockIdx.y*Sgp+threadIdx.y*tileSz;
float3 boxMin, boxMax; set_boxMinMax(boxMin, boxMax);
boxMax=((float3&)im3D.box_shrink)*boxMax;
boxMin=((float3&)im3D.box_shrink)*boxMin;
Ray r; set_eyeRay(r, x,y);
float3 invR = make_float3(1.0f) / (r.d+1e-5);
float3 tB = invR * (boxMin - r.o);
float3 tT = invR * (boxMax - r.o);
float tz=r.d.z<0?tB.z:tT.z, xZ=r.o.x+r.d.x*tz, yZ=r.o.y+r.d.y*tz;
float ty=r.d.y<0?tB.y:tT.y, zY=r.o.z+r.d.z*ty, xY=r.o.x+r.d.x*ty;
float tx=r.d.x<0?tB.x:tT.x, yX=r.o.y+r.d.y*tx, zX=r.o.z+r.d.z*tx;
fm[2][threadIdx.x][threadIdx.y] = make_float2(xZ, yZ);
fm[1][threadIdx.x][threadIdx.y] = make_float2(zY, xY);
fm[0][threadIdx.x][threadIdx.y] = make_float2(yX, zX);
if(xZ>=boxMin.x && yZ>=boxMin.y && xZ<=boxMax.x && yZ<=boxMax.y) hit[threadIdx.x][threadIdx.y] = 1; //mk_pts(x,y, red);}
else if(zY>=boxMin.z && xY>=boxMin.x && zY<=boxMax.z && xY<=boxMax.x) hit[threadIdx.x][threadIdx.y] = 2; //mk_pts(x,y, green);}
else if(yX>=boxMin.y && zX>=boxMin.z && yX<=boxMax.y && zX<=boxMax.z) hit[threadIdx.x][threadIdx.y] = 4; //mk_pts(x,y, blue);}
else hit[threadIdx.x][threadIdx.y] = 0;
__syncthreads();
int hitA=0, hitM=0;
if(threadIdx.x<tilesN-1 && threadIdx.y<tilesN-1) {
for(int i=0;i<2;i++) for(int j=0;j<2;j++) {
int h=hit[threadIdx.x+i][threadIdx.y+j];
if(h>0) { hitA++; hitM |= h; }
}
}
int cs=abs(2*hitM-7)/2;
if(hitA==0 || hitA==4 || cs>=3) return;
bool is4tick=false, is4bnd=false, is4axis=false;
is4bnd = hitM==1 || hitM==2 || hitM==4;
is4axis= hitM==3 || hitM==5 || hitM==6;
int cp=(cs+1)%3, cm=(cs+2)%3;
float2 tick_sh={0.0,0.0}, tick2sh={0.0,0.0}; float tick_val;
const float axis_gap=60., tick_gap=20.;
float2 pt, spt={0.,0.}; float bMax[]={boxMax.x,boxMax.y,boxMax.z}, bMin[]={boxMin.x,boxMin.y,boxMin.z};
int labN=(blockIdx.x*(tilesN-1)+threadIdx.x)+gridDim.x*(tilesN-1)*(blockIdx.y*(tilesN-1)+threadIdx.y);
if(is4axis) {
float2 p0=fm[cm][threadIdx.x][threadIdx.y], px=fm[cm][threadIdx.x+1][threadIdx.y], py=fm[cm][threadIdx.x][threadIdx.y+1];
if(fabs(p0.x-bMax[cs])<fabs(p0.x-bMin[cs])) { pt.x = bMax[cs]; spt.x = axis_gap; }
else { pt.x = bMin[cs]; spt.x = -axis_gap; }
pt.y = fabs(p0.y-bMax[cp])<fabs(p0.y-bMin[cp])?bMax[cp]:bMin[cp];
tick_sh = pt_inside(pt, p0,px,py);
tick2sh = pt_inside(pt+spt, p0,px,py);
printf("set arrow %d from %g,%g to %g,%g front nohead\n", labN, x+tick_sh.x*tileSz,y+tick_sh.y*tileSz, x+tick2sh.x*tileSz,y+tick2sh.y*tileSz);
printf("set label %d \"%c\" at %g,%g front center\n", labN, "xyz?"[cs], x+tick2sh.x*tileSz,y+tick2sh.y*tileSz+tick_gap*((tick2sh.y<tick_sh.y)?-1.:1.));
} else if(is4bnd) {
float2 fmin,fmax; fmin = fmax = fm[cs][threadIdx.x][threadIdx.y];
for(int i=0;i<2;i++) for(int j=0;j<2;j++) {
float2 f = fm[cs][threadIdx.x+i][threadIdx.y+j];
if(f.x<fmin.x) fmin.x = f.x;
if(f.y<fmin.y) fmin.y = f.y;
if(f.x>fmax.x) fmax.x = f.x;
if(f.y>fmax.y) fmax.y = f.y;
}
if(fmin.x<bMin[cp] || fmax.x>bMax[cp]) {// cM = cm;
int mmin=floorf(fmin.y/im3D.MeshBox[cm]), mmax=floorf(fmax.y/im3D.MeshBox[cm]);
if(mmin != mmax) is4tick = true;
pt.x = fmin.x<bMin[cp]?bMin[cp]:bMax[cp]; spt.x = fmin.x<bMin[cp]?-tick_gap:tick_gap;
pt.y = mmax*im3D.MeshBox[cm];
tick_val = im3D.base[cm] + pt.y*im3D.step[cm];
} else if(fmin.y<bMin[cm] || fmax.y>bMax[cm]) {// cM = cp;
int mmin=floorf(fmin.x/im3D.MeshBox[cp]), mmax=floorf(fmax.x/im3D.MeshBox[cp]);
if(mmin != mmax) is4tick = true;
pt.x = mmax*im3D.MeshBox[cp];
pt.y = fmin.y<bMin[cm]?bMin[cm]:bMax[cm]; spt.y = fmin.y<bMin[cm]?-tick_gap:tick_gap;
tick_val = im3D.base[cp] + pt.x*im3D.step[cp];
}
if(is4tick) {
float2 p0=fm[cs][threadIdx.x][threadIdx.y], px=fm[cs][threadIdx.x+1][threadIdx.y], py=fm[cs][threadIdx.x][threadIdx.y+1], p1=fm[cs][threadIdx.x+1][threadIdx.y+1];
if(is_inside(pt, p0,px,py)) {
tick_sh = pt_inside(pt, p0,px,py);
tick2sh = pt_inside(pt+spt, p0,px,py);
} else if(is_inside(pt, p1,py,px)) {
tick_sh = 1.0-pt_inside(pt, p1,py,px);
tick2sh = 1.0-pt_inside(pt+spt, p1,py,px);
} else is4tick = false;
if(is4tick) printf("set label %d \"%g\" at %g,%g front %s\n", labN, tick_val, x+tick2sh.x*tileSz,y+tick2sh.y*tileSz, (tick2sh.x<tick_sh.x)?"right":"left");
}
}
uchar4 red=make_uchar4(255,0,0,0), green=make_uchar4(0,255,0,0), blue=make_uchar4(0,0,255,0);
uchar4 ltred=make_uchar4(128,0,0,0), ltgreen=make_uchar4(0,128,0,0), ltblue=make_uchar4(0,0,128,0);
if(is4axis) {
mk_box(x,y, red);
mk_pts(x+tick2sh.x*tileSz,y+tick2sh.y*tileSz, red);
} else if(is4tick) {
mk_box(x,y, blue);
mk_pts(x+tick2sh.x*tileSz,y+tick2sh.y*tileSz, blue);
} else if(is4bnd) mk_box(x,y, green);
else mk_box(x,y, ltblue);
}
__global__ void __launch_bounds__(1024,1) grad_render3D() {
#if DATA_VECTOR_SZ==1
const float opacityThreshold = im3D.opacity;
const float density=im3D.density, brightness=im.max_rgb;
float3 boxMin, boxMax; set_boxMinMax(boxMin, boxMax);
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
Ray eyeRay; set_eyeRay(eyeRay, x,y);
uchar4& vbmp=get_backgrownd(eyeRay, boxMin, boxMax, y*im3D.bNx + x);
float phi=im3D.randArr[threadIdx.x+threadIdx.y*blockDim.x];
set_eyeRay(eyeRay, x+im3D.randR*cos(phi),y+im3D.randR*sin(phi));
float tnear, tfar;
int hit = intersectBox(eyeRay, boxMin, boxMax, &tnear, &tfar);
if (!hit) return;
if(tnear < 0.0f) tnear = 0.0f; // clamp to near plane
float4 sum = make_float4(0.0f);
const float3 SzfdBox=make_float3(im3D.Nx,im3D.Ny,im3D.Nz)/(boxMax-boxMin);
float3 pos_sc = (eyeRay.o + eyeRay.d*tnear-boxMin)*SzfdBox-0.5f;
const float3 step_sc = (eyeRay.d*im3D.tstep)*SzfdBox;
for(float t=tnear; t<tfar; t+=im3D.tstep, pos_sc += step_sc) {
// cross stencil:
float d=im3D.tstep, dd=im.max_rgb_step*0.5/d;
float dfdx=dd*(tex3D(data3D_tex, pos_sc.x+d, pos_sc.y, pos_sc.z)-tex3D(data3D_tex, pos_sc.x-d, pos_sc.y, pos_sc.z));
float dfdy=dd*(tex3D(data3D_tex, pos_sc.x, pos_sc.y+d, pos_sc.z)-tex3D(data3D_tex, pos_sc.x, pos_sc.y-d, pos_sc.z));
float dfdz=dd*(tex3D(data3D_tex, pos_sc.x, pos_sc.y, pos_sc.z+d)-tex3D(data3D_tex, pos_sc.x, pos_sc.y, pos_sc.z-d));
float4 col = im.get_color_for3D(make_float4(dfdx,dfdy,dfdz,tex3D(data3D_tex, pos_sc.x, pos_sc.y, pos_sc.z)));
float w=col.w*density*(1.0f - sum.w); col.w = 1;
sum += col * w;
if(sum.w >= opacityThreshold) {
sum -= col*(sum.w - opacityThreshold);
break;
}/*
col.w *= density;
col.x *= col.w;
col.y *= col.w;
col.z *= col.w;
sum = sum + col*(1.0f - sum.w);
if (sum.w > opacityThreshold) break;*/
}
sum.x *= brightness; sum.y *= brightness; sum.z *= brightness;
vbmp = rgbaFloatToInt(sum, vbmp);
if(im3D.draw_fg_flag) vbmp=get_foregrownd(eyeRay, boxMin, boxMax, y*im3D.bNx + x);
if(im3D.draw_sec_xyz_flag) {
if(fabs(pos_sc.x-im3D.ix0)<=0.5|| fabs(pos_sc.y-im3D.iy0)<=0.5|| fabs(pos_sc.z-im3D.iz0)<=0.5) vbmp = make_uchar4(255-vbmp.x,255-vbmp.y,255-vbmp.z,vbmp.w);
}
#endif
}
__global__ void __launch_bounds__(1024,1) surf_render3D() {
#if DATA_VECTOR_SZ==1
const float opacityThreshold = im3D.opacity;
const float density=im3D.density, brightness=im.max_rgb;
float3 boxMin, boxMax; set_boxMinMax(boxMin, boxMax);
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
Ray eyeRay; set_eyeRay(eyeRay, x,y);
uchar4& vbmp=get_backgrownd(eyeRay, boxMin, boxMax, y*im3D.bNx + x);
float phi=im3D.randArr[threadIdx.x+threadIdx.y*blockDim.x];
set_eyeRay(eyeRay, x+im3D.randR*cos(phi),y+im3D.randR*sin(phi));
float tnear, tfar;
int hit = intersectBox(eyeRay, boxMin, boxMax, &tnear, &tfar);
if (!hit) return;
if(tnear < 0.0f) tnear = 0.0f; // clamp to near plane
float4 sum = make_float4(0.0f);
const float3 SzfdBox=make_float3(im3D.Nx,im3D.Ny,im3D.Nz)/(boxMax-boxMin);
float3 pos_sc = (eyeRay.o + eyeRay.d*tnear-boxMin)*SzfdBox-0.5f;
const float3 step_sc = (eyeRay.d*im3D.tstep)*SzfdBox;
for(float t=tnear; t<tfar; t+=im3D.tstep, pos_sc += step_sc) {
// cross stencil:
short2 s2=tex3D(data3Dsurf_tex, pos_sc.x, pos_sc.y, pos_sc.z);
const short MAX_SHORT=(1<<15)-1; const float dMS=1.0f/MAX_SHORT;
float3 f={0,0,0};
if(s2.x!=-MAX_SHORT-1 && s2.y!=-MAX_SHORT-1) {
f.z = s2.x*dMS; float fxy=sqrt(1-f.z*f.z), phi=s2.y*dMS*M_PI;
f.y = fxy*sin(phi);
f.x = fxy*cos(phi);
}
float4 col = im.get_color_for3D(make_float4(f.x,f.y,f.z,tex3D(data3D_tex, pos_sc.x, pos_sc.y, pos_sc.z)));
float w=col.w*density*(1.0f - sum.w); col.w = 1;
sum += col * w;
if(sum.w >= opacityThreshold) {
sum -= col*(sum.w - opacityThreshold);
break;
}
}
sum.x *= brightness; sum.y *= brightness; sum.z *= brightness;
vbmp = rgbaFloatToInt(sum, vbmp);
if(im3D.draw_fg_flag) vbmp=get_foregrownd(eyeRay, boxMin, boxMax, y*im3D.bNx + x);
if(im3D.draw_sec_xyz_flag) {
if(fabs(pos_sc.x-im3D.ix0)<=0.5|| fabs(pos_sc.y-im3D.iy0)<=0.5|| fabs(pos_sc.z-im3D.iz0)<=0.5) vbmp = make_uchar4(255-vbmp.x,255-vbmp.y,255-vbmp.z,-vbmp.w);
}
//if(x==im3D.bNx/2 && y==im3D.bNy/2) printf("Surf: %f,%f,%f,%f*%f/%f*%f => %d,%d,%d\n", sum.x,sum.y,sum.z,sum.w, last_mul,opacityThreshold, brightness, vbmp.x,vbmp.y,vbmp.z);
#endif
}
__global__ void __launch_bounds__(1024,1) render3D() {
const float opacityThreshold = im3D.opacity;//0.95f;
const float density=im3D.density, brightness=im.max_rgb;
float3 boxMin, boxMax; set_boxMinMax(boxMin, boxMax);
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
//bool isCnt=blockIdx.x==gridDim.x/2 && blockIdx.y==gridDim.y/2 && threadIdx.x == blockDim.x/2 && threadIdx.y == blockDim.y/2;
//if ((x >= im3D.bNx) || (y >= im3D.bNy)) return;
//if(x==0 && y==0) printf("block: %gx%gx%g\n", boxMax.x, boxMax.y, boxMax.z);
// calculate eye ray in world space
Ray eyeRay; set_eyeRay(eyeRay, x,y);
//const int Nsum=im3D.Nx+im3D.Ny+im3D.Nz;
//const float dbNxy=2.0f/(im3D.bNx+im3D.bNy);
//eyeRay.o = make_float3(mul(c_invViewMatrix, make_float4(0.0f, 0.0f, 0.0f, 0.32f*Nsum)));
//eyeRay.d = normalize(make_float3((x-im3D.bNx/2)*dbNxy, (y-im3D.bNy/2)*dbNxy, -2.0f));
//eyeRay.d = mul(c_invViewMatrix, eyeRay.d);
uchar4& vbmp=get_backgrownd(eyeRay, boxMin, boxMax, y*im3D.bNx + x);
float phi=im3D.randArr[threadIdx.x+threadIdx.y*blockDim.x];
set_eyeRay(eyeRay, x+im3D.randR*cos(phi),y+im3D.randR*sin(phi));
float tnear, tfar;
int hit = intersectBox(eyeRay, boxMin, boxMax, &tnear, &tfar);
if (!hit) return;
if(tnear < 0.0f) tnear = 0.0f; // clamp to near plane
//if(tnear+im3D.tstep*Nsum<tfar) tfar = tnear+im3D.tstep*Nsum;
// march along ray from front to back, accumulating color
float4 sum = make_float4(0.0f);
//float3 pos = eyeRay.o + eyeRay.d*tnear;
//float3 step = eyeRay.d*im3D.tstep;
const float3 SzfdBox=make_float3(im3D.Nx,im3D.Ny,im3D.Nz)/(boxMax-boxMin);
float3 pos_sc = (eyeRay.o + eyeRay.d*tnear-boxMin)*SzfdBox-0.5f;
const float3 step_sc = (eyeRay.d*im3D.tstep)*SzfdBox;
//const float pscale=im.pscale*0.01f, fscale=100.0f*im.fscale, fmin=0.5f-im.fmin*fscale;
//if(isCnt) printf("I am ray: %f(%f)%f step: %f,%f,%f; pos: %f,%f,%f of %d,%d,%d\n", tnear,im3D.tstep,tfar, step_sc.x,step_sc.y,step_sc.z, pos_sc.x, pos_sc.y, pos_sc.z, im3D.Nx,im3D.Ny,im3D.Nz);
for(float t=tnear; t<tfar; t+=im3D.tstep, pos_sc += step_sc) {
// read from 3D texture
float4 col = im.get_color_for3D(tex3D(data3D_tex, pos_sc.x, pos_sc.y, pos_sc.z));
float w=col.w*density*(1.0f - sum.w); col.w = 1;
sum += col * w;
if(sum.w >= opacityThreshold) {
sum -= col*(sum.w - opacityThreshold);
break;
}/*
//float f = tex3D(data3D_tex, pos_sc.x, pos_sc.y, pos_sc.z);
//float4 col = tex1D(fpal_col_tex, 0.5f + pscale*tex1D(fpal_scale_tex, fmin+f*fscale));
col.w *= density;
// "under" operator for back-to-front blending
//sum = lerp(sum, col, col.w);
// pre-multiply alpha
col.x *= col.w;
col.y *= col.w;
col.z *= col.w;
// "over" operator for front-to-back blending
sum = sum + col*(1.0f - sum.w);
// exit early if opaque
if (sum.w > opacityThreshold) break;
// pos_sc += step_sc;
*/
}
//if(isCnt) printf("I am ray: %f\n",sum.w);
sum.x *= brightness; sum.y *= brightness; sum.z *= brightness;
//sum *= brightness;
// write output color
vbmp = rgbaFloatToInt(sum, vbmp);
if(im3D.draw_fg_flag) vbmp=get_foregrownd(eyeRay, boxMin, boxMax, y*im3D.bNx + x);
//if(threadIdx.x==0 && threadIdx.y==0) vbmp = make_uchar4(255,255,255,255);
if(im3D.draw_sec_xyz_flag) {
if(fabs(pos_sc.x-im3D.ix0)<=0.5|| fabs(pos_sc.y-im3D.iy0)<=0.5|| fabs(pos_sc.z-im3D.iz0)<=0.5) vbmp = make_uchar4(255-vbmp.x,255-vbmp.y,255-vbmp.z,-vbmp.w);
}
}
void im3D_pars::save_bmp4backgrownd() {
try {
uchar4* devPtr; size_t size;
if(CHECK_ERROR(hipGraphicsMapResources(1, &im2D.resource, NULL))) throw(-1);
if(imHost.negate_flag)hipLaunchKernelGGL(( negate) , dim3(bNx/NW),dim3(NW), 0, 0, );
if(CHECK_ERROR(hipGraphicsResourceGetMappedPointer((void**) &devPtr, &size, im2D.resource))) throw(-1);
if(imHost.bmp4backgrownd != 0) CHECK_ERROR(hipFree(imHost.bmp4backgrownd));
if(CHECK_ERROR(hipMalloc((void**) &imHost.bmp4backgrownd, size))) throw(-1);
if(CHECK_ERROR(hipMemcpy(imHost.bmp4backgrownd, devPtr, size, hipMemcpyDeviceToDevice))) throw(-1);
im2D.unmapAfterDraw();
} catch(...) {
printf("save_bmp4backgrownd: - .\n");
}
}
void im3D_pars::recalc_sec_im3D() {
try {
imHost.bmp = im2D.map4draw();
imHost.bind2draw();
if(CHECK_ERROR(hipMemcpyToSymbol(im, &imHost, sizeof(imHost)))) throw(-1);
if(CHECK_ERROR(hipMemcpyToSymbol(im3D, this, sizeof(im3D_pars)))) throw(-1);
int NxZ=Nx/x_zoom, NyZ=Ny/y_zoom, NzZ=Nz/z_zoom;
int NxB=(NxZ+NW-1)/NW, NyB=(NyZ+NW-1)/NW, NzB=(NzZ+NW-1)/NW;
unsigned char ub[3];
for(int i=0; i<3; i++) { ub[i] = bkgr_col[i]<0?0:(bkgr_col[i]>1?255:255.*bkgr_col[i]); }
hipLaunchKernelGGL(( im3Dclear) , dim3(dim3(bNx/NW,bNy/NW)),dim3(dim3(NW,NW)), 0, 0, make_uchar4(ub[0], ub[1], ub[2], 255));
int shX=0,shY=0;
for(int ix=int(Nx*RotPoint[0])%int(MeshBox[0]); ix<Nx; ix+=MeshBox[0]) {
if(shX+NyZ>bNx) { shX=0; shY += NzZ+2; } if(shY+NzZ>bNy) break;
hipLaunchKernelGGL(( im3Ddraw_any<0,1,2>) , dim3(dim3(NyB,NzB)),dim3(dim3(NW,NW)), 0, 0, shX+shY*bNx,ix);
shX += NyZ+2;
}// if(shX>0) { shX=0; shY += NzZ+2; }
for(int iy=int(Ny*RotPoint[1])%int(MeshBox[1]); iy<Ny; iy+=MeshBox[1]) {
if(shX+NxZ>bNx) { shX=0; shY += NzZ+2; } if(shY+NzZ>bNy) break;
hipLaunchKernelGGL(( im3Ddraw_any<1,0,2>) , dim3(dim3(NxB,NzB)),dim3(dim3(NW,NW)), 0, 0, shX+shY*bNx,iy);
shX += NxZ+2;
} if(shX>0) { shX=0; shY += NzZ+2; }
for(int iz=int(Nz*RotPoint[2])%int(MeshBox[2]); iz<Nz; iz+=MeshBox[2]) {
//printf("draw xy at iz=%d; (%d,%d) -> (%d,%d)..\n", iz, shX,shY, 0,shY +(NyZ+2), );
if(shX+NxZ>bNx) { shX=0; shY += NyZ+2; } if(shY+NyZ>bNy) break;
hipLaunchKernelGGL(( im3Ddraw_any<1,2,0>) , dim3(dim3(NxB,NyB)),dim3(dim3(NW,NW)), 0, 0, shX+shY*bNx,iz);
shX += NxZ+2;
}// if(shX>0) { shX=0; shY += NyZ+2; }
if(imHost.draw_flag)hipLaunchKernelGGL(( draw_pal) , dim3(bNx/NW),dim3(NW), 0, 0, );
if(imHost.negate_flag)hipLaunchKernelGGL(( negate) , dim3(bNx/NW),dim3(NW), 0, 0, );
imHost.nFrame++;
imHost.unbindAfterDraw();
im2D.unmapAfterDraw();
} catch(...) {
printf("recalc_im3D: - .\n");
}
}
void im3D_pars::shift0(int x, int y, int x1, int y1) {
int ix,iy, dx=x1-x, dy=y1-y, sh=dx+dy*bNx;
if(secType!=1) {
ix=(x-secXsh%bNx)*z_zoom; iy=(y-secXsh/bNx)*y_zoom;
if(0<=ix && ix<Nz && 0<=iy && iy<Ny) { if(secXsh%bNx+dx>=0 && secXsh/bNx+dy>=0) secXsh += sh; return; }
} else {
ix=(x-secXsh%bNx)*y_zoom; iy=(y-secXsh/bNx)*z_zoom;
if(0<=ix && ix<Ny && 0<=iy && iy<Nz) { if(secXsh%bNx+dx>=0 && secXsh/bNx+dy>=0) secXsh += sh; return; }
}
if(secType<2) {
ix=(x-secYsh%bNx)*x_zoom; iy=(y-secYsh/bNx)*z_zoom;
if(0<=ix && ix<Nx && 0<=iy && iy<Nz) { if(secYsh%bNx+dx>=0 && secYsh/bNx+dy>=0) secYsh += sh; return; }
} else {
ix=(x-secYsh%bNx)*z_zoom; iy=(y-secYsh/bNx)*x_zoom;
if(0<=ix && ix<Nz && 0<=iy && iy<Nx) { if(secYsh%bNx+dx>=0 && secYsh/bNx+dy>=0) secYsh += sh; return; }
}
ix=(x-secZsh%bNx)*x_zoom; iy=(y-secZsh/bNx)*y_zoom;
if(0<=ix && ix<Nx && 0<=iy && iy<Ny) { if(secZsh%bNx+dx>=0 && secZsh/bNx+dy>=0) secZsh += sh; return; }
}
void im3D_pars::reset0(int x, int y) {
int ix,iy;
ix=(x-secZsh%bNx)*x_zoom; iy=(y-secZsh/bNx)*y_zoom;
if(0<=ix && ix<Nx && 0<=iy && iy<Ny) { ix0 = ix; iy0 = iy; return; }
if(secType<2) {
ix=(x-secYsh%bNx)*x_zoom; iy=(y-secYsh/bNx)*z_zoom;
if(0<=ix && ix<Nx && 0<=iy && iy<Nz) { ix0 = ix; iz0 = iy; return; }
} else {
ix=(x-secYsh%bNx)*z_zoom; iy=(y-secYsh/bNx)*x_zoom;
if(0<=ix && ix<Nz && 0<=iy && iy<Nx) { iz0 = ix; ix0 = iy; return; }
}
if(secType!=1) {
ix=(x-secXsh%bNx)*z_zoom; iy=(y-secXsh/bNx)*y_zoom;
if(0<=ix && ix<Nz && 0<=iy && iy<Ny) { iz0 = ix; iy0 = iy; return; }
} else {
ix=(x-secXsh%bNx)*y_zoom; iy=(y-secXsh/bNx)*z_zoom;
if(0<=ix && ix<Ny && 0<=iy && iy<Nz) { iy0 = ix; iz0 = iy; return; }
}
}
void im3D_pars::recalc_im3D() {
try {
imHost.bmp = im2D.map4draw();
imHost.bind2draw();
if(CHECK_ERROR(hipMemcpyToSymbol(im, &imHost, sizeof(imHost)))) throw(-1);
if(CHECK_ERROR(hipMemcpyToSymbol(im3D, this, sizeof(im3D_pars)))) throw(-1);
//if(CHECK_ERROR(hipMemcpyToSymbol(c_invViewMatrix, invViewMatrix, sizeof(float4)*3))) throw(-1);
//if(CHECK_ERROR(hipDeviceSetCacheConfig(hipFuncCachePreferShared))) throw(-1);
//Pal via Tex
int NxB=(Nx/x_zoom+NW-1)/NW, NyB=(Ny/y_zoom+NW-1)/NW, NzB=(Nz/z_zoom+NW-1)/NW;
unsigned char ub[3];
for(int i=0; i<3; i++) { ub[i] = bkgr_col[i]<0?0:(bkgr_col[i]>1?255:255.*bkgr_col[i]); }
hipLaunchKernelGGL(( im3Dclear) , dim3(dim3(bNx/NW,bNy/NW)),dim3(dim3(NW,NW)), 0, 0, make_uchar4(ub[0], ub[1], ub[2], 255));
hipLaunchKernelGGL(( im3Ddraw_any<1,2,0>) , dim3(dim3(NxB,NyB)),dim3(dim3(NW,NW)), 0, 0, secZsh,iz0);
if(secType<2)hipLaunchKernelGGL(( im3Ddraw_any<1,0,2>) , dim3(dim3(NxB,NzB)),dim3(dim3(NW,NW)), 0, 0, secYsh,iy0);
else hipLaunchKernelGGL(( im3Ddraw_any<2,0,1>) , dim3(dim3(NzB,NxB)),dim3(dim3(NW,NW)), 0, 0, secYsh,iy0);
if(secType!=1)hipLaunchKernelGGL(( im3Ddraw_any<0,2,1>) , dim3(dim3(NzB,NyB)),dim3(dim3(NW,NW)), 0, 0, secXsh,ix0);
else hipLaunchKernelGGL(( im3Ddraw_any<0,1,2>) , dim3(dim3(NyB,NzB)),dim3(dim3(NW,NW)), 0, 0, secXsh,ix0);
if(imHost.draw_flag)hipLaunchKernelGGL(( draw_pal) , dim3(bNx/NW),dim3(NW), 0, 0, );
if(imHost.negate_flag)hipLaunchKernelGGL(( negate) , dim3(bNx/NW),dim3(NW), 0, 0, );
imHost.nFrame++;
imHost.unbindAfterDraw();
im2D.unmapAfterDraw();
} catch(...) {
printf("recalc_im3D: - .\n");
}
}
void im3D_pars::recalc3D_im3D() {
try {
// use OpenGL to build view matrix
GLfloat modelView[16];
glMatrixMode(GL_MODELVIEW);
glPushMatrix();
glLoadIdentity();
switch(mk_state.modState) {
case GLUT_ACTIVE_SHIFT:
glRotatef(-viewRotation[1], 0.0, 1.0, 0.0);
glRotatef(-viewRotation[0], 1.0, 0.0, 0.0);
break;
case GLUT_ACTIVE_CTRL:
default:
glRotatef(-viewRotation[0], 1.0, 0.0, 0.0);
glRotatef(-viewRotation[1], 0.0, 1.0, 0.0);
glRotatef(-viewRotation[2], 0.0, 0.0, 1.0);
glRotatef(-viewRotationTmp[0], 1.0, 0.0, 0.0);
glRotatef(-viewRotationTmp[1], 0.0, 1.0, 0.0);
}
glTranslatef(-viewTranslation[0], -viewTranslation[1], -viewTranslation[2]);
glGetFloatv(GL_MODELVIEW_MATRIX, modelView);
glPopMatrix();
for(int i=0; i<12; i++) invViewMatrix[i] = modelView[4*(i&3)+i/4];
if(CHECK_ERROR(hipMemcpyToSymbol(c_invViewMatrix, invViewMatrix, sizeof(float4)*3))) throw(-1);
//copyInvViewMatrix(invViewMatrix, sizeof(float4)*3);
imHost.bmp = im2D.map4draw();
imHost.bind2draw();
if(CHECK_ERROR(hipMemcpyToSymbol(im, &imHost, sizeof(imHost)))) throw(-1);
if(CHECK_ERROR(hipMemcpyToSymbol(im3D, this, sizeof(im3D_pars)))) throw(-1);
//if(CHECK_ERROR(hipDeviceSetCacheConfig(hipFuncCachePreferShared))) throw(-1);
switch(mode3D) {
case 0:hipLaunchKernelGGL(( render3D) , dim3(dim3(bNx/NW,bNy/NW)),dim3(dim3(NW,NW)), 0, 0, ); break;
case 1:
#ifdef SURF
hipLaunchKernelGGL(( surf_render3D) , dim3(dim3(bNx/NW,bNy/NW)),dim3(dim3(NW,NW)), 0, 0, );
#else//SURF
printf(" im3D.cu -DSURF im3Dsurf\n");
#endif//SURF
break;
case 2:hipLaunchKernelGGL(( grad_render3D) , dim3(dim3(bNx/NW,bNy/NW)),dim3(dim3(NW,NW)), 0, 0, ); break;
}
if(imHost.draw_flag) {
//if(mode3D<=1)
hipLaunchKernelGGL(( draw_pal) , dim3(bNx/NW),dim3(NW), 0, 0, );
if(mode3D>0)hipLaunchKernelGGL(( draw_pal3D) , dim3(NW),dim3(NW), 0, 0, );
//if(imHost.palDim <= 2)hipLaunchKernelGGL(( draw_pal) , dim3(bNx/NW),dim3(NW), 0, 0, );
//else if(imHost.palDim > 1)hipLaunchKernelGGL(( draw_pal3D) , dim3(NW),dim3(NW), 0, 0, );
}
if(imHost.negate_flag)hipLaunchKernelGGL(( negate) , dim3(bNx/NW),dim3(NW), 0, 0, );
imHost.nFrame++;
imHost.unbindAfterDraw();
im2D.unmapAfterDraw();
} catch(...) {
printf("recalc3D_im3D: - .\n");
}
}
#include <hipfft.h>
//inline __device__ float my_fabsC(float2& v) { return v.x;}//hypotf(v.x, v.y); }
inline __device__ float my_fabsC(float2& v) { return hypotf(v.x, v.y); }
inline __device__ int my_abs(int v) { return v>=0?v:-v; }
//inline __device__ int my_abs(int v) { return v==0?1:v>=0?v:-v; }
__global__ void cmplx2abs(hipfftComplex *dataC, hipfftReal *dataR) {
//float* pC=(float*)(dataC+blockIdx.x*(blockDim.x/2+1));
//dataR[blockIdx.x*blockDim.x+threadIdx.x] = pC[threadIdx.x];
dataR[blockIdx.x*blockDim.x+threadIdx.x] = my_fabsC(dataC[blockIdx.x*(blockDim.x/2+1)+my_abs(blockDim.x/2-threadIdx.x)]);
}
#define CHECK_ERROR_FFT(err) CheckErrorFFT( err, __FILE__,__LINE__)
bool CheckErrorFFT(hipfftResult rs, const char *file, int line) {
if(rs == HIPFFT_SUCCESS) return false;
const char* err=" cuFFT";
switch(rs) {
case HIPFFT_SUCCESS: err = "0, // The cuFFT operation was successful";
case HIPFFT_INVALID_PLAN: err = "1, // cuFFT was passed an invalid plan handle";
case HIPFFT_ALLOC_FAILED: err = "2, // cuFFT failed to allocate GPU or CPU memory";
case HIPFFT_INVALID_TYPE: err = "3, // No longer used";
case HIPFFT_INVALID_VALUE: err = "4, // User specified an invalid pointer or parameter";
case HIPFFT_INTERNAL_ERROR: err = "5, // Driver or internal cuFFT library error";
case HIPFFT_EXEC_FAILED: err = "6, // Failed to execute an FFT on the GPU";
case HIPFFT_SETUP_FAILED: err = "7, // The cuFFT library failed to initialize";
case HIPFFT_INVALID_SIZE: err = "8, // User specified an invalid transform size";
case HIPFFT_UNALIGNED_DATA: err = "9, // No longer used";
case HIPFFT_INCOMPLETE_PARAMETER_LIST: err = "10, // Missing parameters in call";
case HIPFFT_INVALID_DEVICE: err = "11, // Execution of a plan was on different GPU than plan creation";
case HIPFFT_PARSE_ERROR: err = "12, // Internal plan database error";
case HIPFFT_NO_WORKSPACE: err = "13 // No workspace has been provided prior to plan execution";
};
fprintf(stderr, "%s in %s at line %d\n", err, file, line);
return true;
}
void makeFFTz(float* buf, int Nx, int Ny, int Nz) {
try {
hipfftHandle plan;
hipfftComplex *dataC; hipfftReal *dataR;
if(CHECK_ERROR(hipMalloc((void**)&dataC, sizeof(hipfftComplex)*(Nz/2+1)*Nx*Ny))) throw(-1);
if(CHECK_ERROR(hipMalloc((void**)&dataR, sizeof(hipfftReal)*Nz*Nx*Ny))) throw(-1);
if(CHECK_ERROR(hipMemcpy(dataR, buf, 4*Nz*Nx*Ny, hipMemcpyHostToDevice))) throw(-1);
if(CHECK_ERROR_FFT(hipfftPlan1d(&plan, Nz, HIPFFT_R2C, Nx*Ny))) throw(-1);
if(CHECK_ERROR_FFT(hipfftExecR2C(plan, dataR, dataC))) throw(-1);
if(CHECK_ERROR(hipDeviceSynchronize())) throw(-1);
hipLaunchKernelGGL(( cmplx2abs) , dim3(Nx*Ny),dim3(Nz), 0, 0, dataC, dataR);
if(CHECK_ERROR(hipDeviceSynchronize())) throw(-1);
if(CHECK_ERROR(hipMemcpy(buf, dataR, 4*Nz*Nx*Ny, hipMemcpyDeviceToHost))) throw(-1);
if(CHECK_ERROR_FFT(hipfftDestroy(plan))) throw(-1);
if(CHECK_ERROR(hipFree(dataC))) throw(-1);
if(CHECK_ERROR(hipFree(dataR))) throw(-1);
} catch(...) {
printf(" makeFFTz.\n");
}
}
void im3D_pars::initCuda(Arr3D_pars& arr) {
//printf("==============\n");
//for(int ix=0; ix<Nx; ix++) for(int iy=0; iy<Ny; iy++) for(int iz=0; iz<Nz; iz++) arr.Arr3Dbuf[iz*Ny*Nx+iy*Nx+ix]=exp(-0.01*ix);
// create transfer function texture
//hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
//if(CHECK_ERROR(hipMalloc3DArray(&data3D_texArray, &channelDesc, make_hipExtent(Nx,Ny,Nz)))) throw(-1);
hipMemcpy3DParms myparms={0};
myparms.srcPos = make_hipPos(0,0,0);
myparms.dstPos = make_hipPos(0,0,0);
myparms.srcPtr = make_hipPitchedPtr(arr.Arr3Dbuf, Nx*sizeof(floatT4im), Nx, Ny);
myparms.dstArray = data3D_texArray;
myparms.extent = make_hipExtent(Nx,Ny,Nz);
myparms.kind = arr.inGPUmem?hipMemcpyDeviceToDevice:hipMemcpyHostToDevice;
if(CHECK_ERROR(hipMemcpy3D(&myparms))) throw(-1);
//if(draw_edges_flag) draw_edges(imHost.fmax);
initTex();
}
void im3D_pars::initTex() {
data3D_tex.normalized = false;//true;
data3D_tex.filterMode = ((render_type==3)==filterMode_flag)?hipFilterModeLinear:hipFilterModePoint; //Point;//filter_pal?hipFilterModePoint:hipFilterModeLinear;
data3D_tex.addressMode[0] = hipAddressModeClamp;//cyclic_pal?hipAddressModeWrap:hipAddressModeClamp;
data3D_tex.addressMode[1] = hipAddressModeClamp;//cyclic_pal?hipAddressModeWrap:hipAddressModeClamp;
data3D_tex.addressMode[2] = hipAddressModeClamp;//cyclic_pal?hipAddressModeWrap:hipAddressModeClamp;
if(CHECK_ERROR(hipBindTextureToArray(data3D_tex, data3D_texArray))) throw(-1);
}
void im3D_pars::initCuda_surf(Arr3D_pars& arr, size_t sh) {
#ifdef SURF
hipMemcpy3DParms myparms={0};
myparms.srcPos = make_hipPos(0,0,0);
myparms.dstPos = make_hipPos(0,0,0);
size_t N=Nx; N*=Ny; N*=Nz;
myparms.srcPtr = make_hipPitchedPtr(arr.Arr3Dbuf+sh, Nx*sizeof(short2), Nx, Ny);
myparms.dstArray = data3Dsurf_texArray;
myparms.extent = make_hipExtent(Nx,Ny,Nz);
myparms.kind = arr.inGPUmem?hipMemcpyDeviceToDevice:hipMemcpyHostToDevice;
if(CHECK_ERROR(hipMemcpy3D(&myparms))) throw(-1);
initTex_surf();
#endif//SURF
}
void im3D_pars::initTex_surf() {
data3Dsurf_tex.normalized = false;//true;
data3Dsurf_tex.filterMode = hipFilterModePoint; //Point;//filter_pal?hipFilterModePoint:hipFilterModeLinear;
data3Dsurf_tex.addressMode[0] = hipAddressModeClamp;//cyclic_pal?hipAddressModeWrap:hipAddressModeClamp;
data3Dsurf_tex.addressMode[1] = hipAddressModeClamp;//cyclic_pal?hipAddressModeWrap:hipAddressModeClamp;
data3Dsurf_tex.addressMode[2] = hipAddressModeClamp;//cyclic_pal?hipAddressModeWrap:hipAddressModeClamp;
if(CHECK_ERROR(hipBindTextureToArray(data3Dsurf_tex, data3Dsurf_texArray))) throw(-1);
}
void reset(im3D_pars* p) {
imHost.reset();
imHost.set_lim(-1.f,1.f);
imHost.draw_flag = imHost.negate_flag = imHost.centric_pal = true;
imHost.cyclic_pal = false;
if(p) p->reset();
}
void im3D_pars::init3D(Arr3D_pars& arr) {
//::reset();
optfid = open(optfName, O_RDWR|O_CREAT, 0644);
if(optfid<0) printf(" %s, / \n", optfName);
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<floatT4im>();
printf("im3D_pars::init3D: Nx,Ny,Nz=%d,%d,%d\n", Nx,Ny,Nz);
if(CHECK_ERROR(hipMalloc3DArray(&data3D_texArray, &channelDesc, make_hipExtent(Nx,Ny,Nz)))) throw(-1);
if(CHECK_ERROR(hipMalloc(&randArr, NW*NW*sizeof(float)))) throw(-1);
hiprandState_t *devStates;
hipMalloc( (void **)&devStates, NW*NW*sizeof(hiprandState_t) );
hipLaunchKernelGGL(( init_rand), dim3(NW),dim3(NW), 0, 0, devStates,randArr);
if(CHECK_ERROR(hipDeviceSynchronize())) throw(-1);
hipFree(devStates);
//initCuda(arr); ---- !!!!!!!!!!!!!!!!!!!
#ifdef SURF
if(CHECK_ERROR(hipDeviceSynchronize())) throw(-1);
channelDesc = hipCreateChannelDesc<short2>();
if(CHECK_ERROR(hipMalloc3DArray(&data3Dsurf_texArray, &channelDesc, make_hipExtent(Nx,Ny,Nz)))) throw(-1);
if(CHECK_ERROR(hipDeviceSynchronize())) throw(-1);
//initCuda_surf(arr); ----- !!!!!!!!!!!!!!!!!!1
#endif//SURF
}
void im3D_pars::recalc_func() {
if(recalc_always || recalc_at_once) {
if(recalc_at_once) recalc_at_once=false;
else xyz->step();
cudaTimer tm; tm.start();
if(draw_bmp4backgrownd>=2 && render_type==3) {
switch(draw_bmp4backgrownd) {
case 2: recalc_im3D(); break;
case 3: recalc_sec_im3D(); break;
}
save_bmp4backgrownd();
}
switch(render_type) {
case 2: recalc_im3D(); break;
case 3: recalc3D_im3D(); break;
}
runTime=tm.stop(); SmoothFPS = 0.9*SmoothFPS+100./runTime;
if(type_diag_flag>=2) printf("Frame %d (%.2f/%.2f fps), last run Times: %7.2f msec\n", imHost.nFrame, SmoothFPS, 1000./runTime, runTime);
}
}
int im3D_pars::init_from_command_line(char** argv) {
if(strcmp(*argv,"--sensor")==0) { float v[3]; read_float3(v, argv[1]); icalcNdrop.add_sensor(v[0], v[1], v[2]); return 2; }
return im3D_pars4save::init_from_command_line(argv);
}
floatT4im Arr3D_pars::get_val_from_arr3D(int ix, int iy, int iz) {
if(inCPUmem) return ((floatT4im*)Arr3Dbuf)[get_ind(ix,iy,iz)];
floatT4im res;
if(inGPUmem) CHECK_ERROR(hipMemcpy(&res, get_ptr((sizeof(floatT4im)/sizeof(float))*ix,iy,iz), sizeof(floatT4im), hipMemcpyDeviceToHost));
return res;
}
/*
__global__ void calc_limits(float* buf, float* fLims, int Nxv, int Nxa, int Nxs) {
float2 fLim;
float* pf=buf+blockIdx.x*Nxv+threadIdx.x;
fLim.x = fLim.y = *pf;
for(int i=0; i<Nxs; i++,pf+=Nxa*Nxv) {
float v=*pf;
if(v<fLim.x) fLim.x = v;
if(v>fLim.y) fLim.y = v;
}
__shared__ float2 fLim_sh[Nxv];
fLim_sh[threadIdx.x] = fLim;
__syncthreads();
if(threadIdx.x>warpSize) return;
for(int i=threadIdx.x; i<Nxv; i+=warpSize) {
float2 v=fLim_sh[i];
if(v.x<fLim.x) fLim.x = v.x;
if(v.y>fLim.y) fLim.y = v.y;
}
fLim_sh[threadIdx.x] = fLim;
if(threadIdx.x>0) return;
for(int i=0; i<warpSize; i++) {
float2 v=fLim_sh[i];
if(v.x<fLim.x) fLim.x = v.x;
if(v.y>fLim.y) fLim.y = v.y;
}
fLims[2*blockIdx.x ] = fLim.x;
fLims[2*blockIdx.x+1] = fLim.y;
}
void Arr3D_pars::set_lim_from_arr3D() {
if(inCPUmem) reset_min_max();
if(inGPUmem) {
float* fLims=0,* fLimsD=0;
CHECK_ERROR(hipMalloc((void**) &fLimsD, 2*Ny*sizeof(float)));
calc_limits<<<Ny,Nx>>>(Arr3Dbuf, fLimsD, Nx, Ny, Nz);
fLims=new float[2*Ny];
CHECK_ERROR(hipMemcpy(fLims, fLimsD, 2*Ny*sizeof(float), hipMemcpyDeviceToHost));
CHECK_ERROR(hipFree(fLimsD));
fMin = fLims[0]; fMax = fLims[1];
for(int i=0; i<Ny; i++) {
if(fLims[2*i ]<fMin) fMin = fLims[2*i ];
if(fLims[2*i+1]>fMax) fMax = fLims[2*i+1];
}
delete fLims;
}
}*/
| 23a5f587d2c36a71e0b23752711703e999a4ee64.cu | //im3D считывает и визуализирует трёхмерные поля,
//получение исходного кода: bzr checkout bzr+ssh://photon/Save/BZR-for-all/lev/im3D
//автор: Вадим Левченко VadimLevchenko@mail.ru
// запуск: ./im3D <имя-файла-массива> [<имя-файла-массива> ...]
//целевой размер массива от 100 до 1500 элементов по каждой координате
//предполагается, что файлы массивов записаны в формате массивов aivlib-а или drp
#include "cuda_math.h"
#include "fpal.h"
#include "im2D.h"
#include "im3D.hpp"
image2D im2D;
image_pars imHost; __constant__ image_pars im;
__constant__ im3D_pars im3D;
float runTime=0.0, SmoothFPS=0.0;
bool recalc_at_once=true, recalc_always=false, save_anim_flag=false, draw_edges_flag=false;
int anim_acc=0, render_type=3;
texture<floatT4im, cudaTextureType3D> data3D_tex;
cudaArray* data3D_texArray=0;
texture<short2, cudaTextureType3D> data3Dsurf_tex;
cudaArray* data3Dsurf_texArray=0;
const char* optfName="im3DI.opt";//Имя файла для сохранения опций визуализации
FILE* gpPipe=0;
int sec1Daxis=0;
//#include <string.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/types.h>
//#include <time.h>
#include <malloc.h>
char WinTitle[1024], addTitleStr[5]; int TitleStrInd=0;
const char* baseTitleStr="2"; int baseTitleFlag=1;
int optfid=-1; int im3DIopt_shift=0;
void im3D_pars4save::load_from_file(const char* fn) {
int optfid=open(fn, O_RDWR|O_CREAT, 0644);
if(optfid<0) { printf("Не могу открыть файл %s, загрузка наборов опций визуализации невозможна\n", fn); return; }
int sz,rs;
rs = read(optfid, &sz, sizeof(sz));
if(sz<=0 || sz>sizeof(fpal_pars)) { printf("Illegal Drop format\n"); return; }
rs=read(optfid, &imHost, sz); printf("Load %dB fpal of %ldB", rs, sizeof(fpal_pars));
rs = read(optfid, &sz, sizeof(sz));
if(sz<=0 || sz>sizeof(im3D_pars4save)) { printf("Illegal Drop format\n"); return; }
rs=read(optfid, this, sz); printf(" & %dB im3D of %ldB\n", rs, sizeof(im3D_pars4save));
close(optfid);
}
char* im3D_pars::reset_title() {
char* pTitle=WinTitle, TitleStr[20];
if(baseTitleFlag%3>0) strcpy(TitleStr,baseTitleStr);
strncpy(TitleStr+strlen(TitleStr),addTitleStr,4);
if(baseTitleFlag%3==1 && fName) { sprintf(pTitle, "%s ", fName); pTitle += strlen(pTitle); }
if(baseTitleFlag%3==2 && dfName) { sprintf(pTitle, "%s ", dfName); pTitle += strlen(pTitle); }
if(strpbrk(TitleStr,"23")) { sprintf(pTitle, "(%dx%dx%d)", Nx,Ny,Nz); pTitle += strlen(pTitle); }
if(strpbrk(TitleStr,"xyzXYZ")) { sprintf(pTitle, "/(%dx%dx%d)", ix0,iy0,iz0); pTitle += strlen(pTitle); }
if(strpbrk(TitleStr,"aA\001\023=-+_06789")) { sprintf(pTitle, " %g<f<%g", imHost.fmin,imHost.fmax); pTitle += strlen(pTitle); }
if(strpbrk(TitleStr,"pP[]|?{}")) { sprintf(pTitle, " pal[%d]:(%g)^%g*%g*%g|%g;", imHost.palID, imHost.pscale, imHost.gamma_pal, imHost.brightness_coff, imHost.max_rgb, imHost.base_val); pTitle += strlen(pTitle); }
if(strpbrk(TitleStr,"dDjJmM")) { sprintf(pTitle, " D/J/M:%g/%g/%g;", density, opacity, tstep); pTitle += strlen(pTitle); }
#ifdef CALC_TIME_DIAG
extern float calcTime, calcPerf; extern int TimeStep;
if(strpbrk(TitleStr,"bG")) { sprintf(pTitle, " calc: %.2f sec, %.2fG cells/sec; %d steps;", 1e-3*calcTime, calcPerf, TimeStep); pTitle += strlen(pTitle); }
#endif
if(strpbrk(TitleStr,"tT\20")) { sprintf(pTitle, " transp: %s,%d", imHost.transparency_discrete_flag?"discr":"mode",imHost.transparency_mode); pTitle += strlen(pTitle); }
//if(strpbrk(TitleStr,"gG")) { sprintf(pTitle, " ", ); pTitle += strlen(pTitle); }
//sprintf(WinTitle, " %.1f fps", , ,recalc_always?SmoothFPS:1000./runTime);
//printf(WinTitle, " render: %.1f fps", , recalc_always?SmoothFPS:1000./runTime);
return WinTitle;
}
struct RotMatr {
double v[3][3];
RotMatr(int c, double phi) {
phi *= M_PI/180;
int cp=(c+1)%3, cm=(c+2)%3;
for(int i=0; i<3; i++) v[i][c] = v[c][i] = 0,0;
v[c][c] = 1.0;
v[cp][cp] = v[cm][cm] = cos(phi);
v[cm][cp] = sin(phi); v[cp][cm] =-v[cm][cp];
}
void operator *= (RotMatr& M) {
double vo[3][3];
for(int i=0; i<3; i++) for(int j=0; j<3; j++) vo[i][j] = v[i][j];
for(int i=0; i<3; i++) for(int j=0; j<3; j++) {
double vn=0.0;
for(int k=0; k<3; k++) vn += M.v[i][k]*vo[k][j];
v[i][j] = vn;
}
}
};
std::string im3D_pars::getfName() {
char fN[]="image.__________";
if(fName) strncpy(fN, fName, sizeof(fN)-1);//[sizeof(fN)-1] = 0;
if(strrchr(fN,'.')) strrchr(fN,'.')[0] = 0;
if(strrchr(fN,' ')) strrchr(fN,' ')[0] = 0;
if(strrchr(fN,'/')) strrchr(fN,'/')[0] = '_';
return std::string(fN);
}
std::string im3D_pars::getfDropName(const char* ext, int it) {
char drop_name[1024];
sprintf(drop_name, "%s/%s_%04d%s", drop_dir, getfName().c_str(), (it>=0)?it:imHost.nFrame, ext);
return std::string(drop_name);
}
bool im3D_pars::save_png(int it) {
im2D.out2png(getfDropName(".png", it).c_str());
imHost.nFpng++;
return false;
}
__global__ void save_gp3D();
const int tileSz=16, tilesN=16;
bool im3D_pars::save_gp(int it) {
std::string png_name=getfDropName(".png", it);
std::string gp_name=getfDropName(".gp", it);
im2D.out2png(png_name.c_str());
//sprintf( gp_name, "a.gp", fName, imHost.nFrame);
FILE* gp=fopen(gp_name.c_str(), "w"),* old_stdout=stdout;
fprintf(gp, "unset key\n");
fprintf(gp, "unset border\n");
fprintf(gp, "unset xtics\n");
fprintf(gp, "set x2tics border\n");
fprintf(gp, "set x2range [%g:%g]\n", imHost.fmin, imHost.fmax);
fprintf(gp, "unset ytics\n");
//fprintf(gp, "load \"labels.gp\"\n");
//printf("viewRotation: %g, %g\n", viewRotation[0], viewRotation[1]);
//printf("viewTranslation: %g, %g, %g\n", viewTranslation[0], viewTranslation[1], viewTranslation[2]);
if(render_type==3) {
const int Sgp=(tilesN-1)*tileSz;
stdout = gp;
if(CHECK_ERROR(cudaDeviceSynchronize())) throw(-1);
save_gp3D <<<dim3((im2D.Nx+Sgp-1)/Sgp,(im2D.Ny+Sgp-1)/Sgp),dim3(tilesN,tilesN)>>>();
if(CHECK_ERROR(cudaDeviceSynchronize())) throw(-1);
stdout = old_stdout;
}
fprintf(gp, "plot[0:%g][0:%g] \"%s\" binary filetype=png dx=1 dy=1 with rgbimage\n", float(bNx), float(bNy), png_name.c_str());
fprintf(gp, "pause -1\n");
fclose(gp);
if(type_diag_flag>=0) printf("Зарамочное оформление сохранено в %s\n", gp_name.c_str());
return false;
}
floatT4im get_val_from_arr3D(int ix, int iy, int iz);
void reset(im3D_pars* p=0);
#if DATA_VECTOR_SZ==1
std::string im3D_pars::save_section(int it) {
printf("f(%d,%d,%d) = %g\n", ix0, iy0, iz0, get_val_from_arr3D(ix0, iy0, iz0));
std::string dat_name=getfDropName(".dat",it);
FILE* dat=fopen(dat_name.c_str(), "w");
for(int i=0; i<Nx; i++) fprintf(dat, "%d %g\n", i, get_val_from_arr3D(i, iy0, iz0));
fprintf(dat, "\n\n");
for(int i=0; i<Ny; i++) fprintf(dat, "%d %g\n", i, get_val_from_arr3D(ix0, i, iz0));
fprintf(dat, "\n\n");
for(int i=0; i<Nz; i++) fprintf(dat, "%d %g\n", i, get_val_from_arr3D(ix0, iy0, i));
fclose(dat);
return dat_name;
}
void im3D_pars::plot_section() {
const char* re=gpPipe?"re":"";
if(gpPipe==NULL) gpPipe = popen("gnuplot", "w");
int sec[]={ix0,iy0,iz0,ix0,iy0};
if(sec1Daxis<3) fprintf(gpPipe, "set style data l;\n%splot '%s' i %d t '%c:(%d,%d)'\n", re, save_section().c_str(), sec1Daxis, "xyz"[sec1Daxis], sec[sec1Daxis+1], sec[sec1Daxis+2]);
else fprintf(gpPipe, "set style data l;\n%splot '%s' u ($1-%d):2 i 0 t '(ix-%d)', '' u ($1-%d):2 i 1 t '(iy-%d)', '' u ($1-%d):2 i 2 t '(iz-%d)'\n", re, save_section().c_str(), ix0,ix0,iy0,iy0,iz0,iz0);
fflush(gpPipe);
}
#elif DATA_VECTOR_SZ==3
std::string im3D_pars::save_section(int it) {
floatT4im v=get_val_from_arr3D(ix0, iy0, iz0);
printf("f(%d,%d,%d) = (%g,%g,%g,%g)\n", ix0, iy0, iz0, v.x, v.y, v.z, v.w);
std::string dat_name=getfDropName(".dat",it);
FILE* dat=fopen(dat_name.c_str(), "w");
for(int i=0; i<Nx; i++) {
floatT4im v=get_val_from_arr3D(i, iy0, iz0);
fprintf(dat, "%d %g %g %g %g\n", i, v.x, v.y, v.z, v.w);
}
fprintf(dat, "\n\n");
for(int i=0; i<Ny; i++) {
floatT4im v=get_val_from_arr3D(ix0, i, iz0);
fprintf(dat, "%d %g %g %g %g\n", i, v.x, v.y, v.z, v.w);
}
fprintf(dat, "\n\n");
for(int i=0; i<Nz; i++) {
floatT4im v=get_val_from_arr3D(ix0, iy0, i);
fprintf(dat, "%d %g %g %g %g\n", i, v.x, v.y, v.z, v.w);
}
fclose(dat);
return dat_name;
}
void im3D_pars::plot_section() {
const char* re=gpPipe?"re":(render_type==3?"s":"");
if(gpPipe==NULL) gpPipe = popen("gnuplot", "w");
int sec[]={ix0,iy0,iz0,ix0,iy0};
if(render_type==3) {
if(sec1Daxis<3) fprintf(gpPipe, "set ticslevel 0; set style data lp;\n%splot '%s' u 2:3:4 i %d t '%c:(%d,%d)'\n", re, save_section().c_str(), sec1Daxis, "xyz"[sec1Daxis], sec[sec1Daxis+1], sec[sec1Daxis+2]);
else fprintf(gpPipe, "set ticslevel 0; set style data lp;\n%splot '%s' u 2:3:4 i 0 t '(ix-%d)', '' u 2:3:4 i 1 t '(iy-%d)', '' u 2:3:4 i 2 t '(iz-%d)'\n", re, save_section().c_str(), ix0,iy0,iz0);
} else if(render_type==2) {
if(sec1Daxis<3) fprintf(gpPipe, "set style data l;\n%splot '%s' u 1:2 i %d t '[%d,%d].x', '' u 1:3 i %d t '.y', '' u 1:4 i %d t '.z'\n", re, save_section().c_str(), sec1Daxis, sec[sec1Daxis+1], sec[sec1Daxis+2], sec1Daxis, sec1Daxis);
else fprintf(gpPipe, "set style data l;\n%splot '%s' u ($1-%d):2 i 0 t '(ix-%d).x', '' u ($1-%d):3 i 0 t '.y', '' u ($1-%d):4 i 0 t '.z', '' u ($1-%d):2 i 1 t '(iy-%d).x', '' u ($1-%d):3 i 1 t '.y', '' u ($1-%d):4 i 1 t '.z', '' u ($1-%d):2 i 2 t '(iz-%d).x', '' u ($1-%d):3 i 2 t '.y', '' u ($1-%d):4 i 2 t '.z'\n", re, save_section().c_str(), ix0,ix0,ix0,ix0,iy0,iy0,iy0,iy0,iz0,iz0,iz0,iz0);
//else fprintf(gpPipe, "set style data l;\n%splot '%s' u ($1-%d):2 i 0 t '(ix-%d)', '' u ($1-%d):2 i 1 t '(iy-%d)', '' u ($1-%d):2 i 2 t '(iz-%d)'\n", re, save_section().c_str(), ix0,ix0,iy0,iy0,iz0,iz0);
}
fflush(gpPipe);
}
#endif
void im3D_pars::clear4exit() {
im2D.clear();
CHECK_ERROR(cudaFreeArray(data3D_texArray));
CHECK_ERROR(cudaFreeArray(data3Dsurf_texArray));
CHECK_ERROR(cudaFree(randArr));
}
void save_bmp4backgrownd();
any_idle_func_struct xyz_void,* xyz=&xyz_void;
struct idle_func_struct3D: public any_idle_func_struct {
float* par, val;
void set(float* _par, float _val) { par = _par; val = _val; }
void step() { *par += val; }
} xyz3D;
struct idle_func_struct2D: public any_idle_func_struct {
int* i0, N, di;
void set(int* _i0, int _N, int _di) { i0=_i0; N=_N; di=_di; }
void step() { *i0 += di; if(*i0<0) *i0=N-1; else if(*i0>=N) *i0=0; }
} xyz2D;
idle_func_calc icalc;
template<class Tflt>
struct idle_func_calcNdrop: public idle_func_calc {
FILE* sensorsStr;
int* sensors;
int Nsensors;
idle_func_calcNdrop(): Nsensors(0), sensors(0), sensorsStr(0) {}
~idle_func_calcNdrop() { delete sensors; }
void add_sensor(int ix, int iy, int iz) {
int* pi=sensors;
for(int i=0; i<Nsensors; i++, pi+=3) if(pi[0] == ix && pi[1] == iy && pi[2] == iz)
{ printf("Сенсор (%d,%d,%d) уже задан. Вы делаете что-то не то!\n", ix, iy, iz); return; }
Nsensors++;
printf("Создаю новый сенсор в точке (%d,%d,%d), теперь их %d, файл <sensors.dat> будет очищен.\n", ix, iy, iz, Nsensors);
if(sensors == 0) sensors = (int*)malloc(Nsensors*3*sizeof(int));
else sensors = (int*)realloc(sensors, Nsensors*3*sizeof(int));
pi = sensors+3*(Nsensors-1);
pi[0] = ix; pi[1] = iy; pi[2] = iz;
sensorsStr = fopen("sensors.dat", "w");
pi=sensors;
fprintf(sensorsStr, "#");
for(int i=0; i<Nsensors; i++, pi+=3) fprintf(sensorsStr, "\t(%d,%d,%d)", pi[0],pi[1],pi[2]);
fprintf(sensorsStr, "\n");
fclose(sensorsStr);
}
#if DATA_VECTOR_SZ==1
void step() {
idle_func_calc::step();
if(Nsensors==0) return;
sensorsStr = fopen("sensors.dat", "a");
fprintf(sensorsStr, "%g", t);
int* pi=sensors;
for(int i=0; i<Nsensors; i++, pi+=3) fprintf(sensorsStr, "\t%g", get_val_from_arr3D(pi[0], pi[1], pi[2]));
fprintf(sensorsStr, "\n");
fclose(sensorsStr);
}
void plot_sensors() {
if(Nsensors==0) return;
if(gpPipe==NULL) gpPipe = popen("gnuplot", "w");
int* pi=sensors;
fprintf(gpPipe, "set style data l;\nplot 'sensors.dat' u 1:2 t '%d,%d,%d'", pi[0],pi[1],pi[2]); pi+=3;
for(int i=1; i<Nsensors; i++, pi+=3) fprintf(gpPipe, ", '' u 1:%d t '%d,%d,%d'", i+2, pi[0],pi[1],pi[2]);
fprintf(gpPipe, "\n");
fflush(gpPipe);
}
#elif DATA_VECTOR_SZ==3
void step() {
idle_func_calc::step();
if(Nsensors==0) return;
sensorsStr = fopen("sensors.dat", "a");
fprintf(sensorsStr, "%g", t);
int* pi=sensors;
for(int i=0; i<Nsensors; i++, pi+=3) {
floatT4im v=get_val_from_arr3D(pi[0], pi[1], pi[2]);
fprintf(sensorsStr, "\t%g\t%g\t%g", v.x, v.y, v.z);
}
fprintf(sensorsStr, "\n");
fclose(sensorsStr);
}
void plot_sensors() {
if(Nsensors==0) return;
if(gpPipe==NULL) gpPipe = popen("gnuplot", "w");
int* pi=sensors;
if(render_type==3) {
fprintf(gpPipe, "set style data lp; set ticslevel 0;\nsplot 'sensors.dat' u 2:3:4 t '%d,%d,%d'", pi[0],pi[1],pi[2]); pi+=3;
for(int i=1; i<Nsensors; i++, pi+=3) fprintf(gpPipe, ", '' u %d:%d:%d t '%d,%d,%d'", 3*i+2,3*i+3,3*i+4, pi[0],pi[1],pi[2]);
} else if(render_type==2) {
fprintf(gpPipe, "set style data l;\nplot 'sensors.dat' u 1:2 t '[%d,%d,%d].x', '' u 1:3 t '[%d,%d,%d].y', '' u 1:4 t '[%d,%d,%d].z'", pi[0],pi[1],pi[2], pi[0],pi[1],pi[2], pi[0],pi[1],pi[2]); pi+=3;
for(int i=1; i<Nsensors; i++, pi+=3) fprintf(gpPipe, ", '' u 1:%d t '[%d,%d,%d].x', '' u 1:%d t '[%d,%d,%d].y', '' u 1:%d t '[%d,%d,%d].z'", 3*i+2, pi[0],pi[1],pi[2],3*i+3, pi[0],pi[1],pi[2],3*i+4, pi[0],pi[1],pi[2]);
}
fprintf(gpPipe, "\n");
fflush(gpPipe);
}
#endif
};
idle_func_calcNdrop<floatT4im> icalcNdrop;
//void add_sensor(int ix, int iy, int iz) { icalcNdrop.add_sensor(ix, iy, iz); }
#include<curand.h>
#include<curand_kernel.h>
__global__ void init_rand(curandState *states, float* randArr) {
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
curand_init(1234, tid, 0, &states[tid]); // Initialize CURAND
randArr[tid] = 2.*M_PI*curand_uniform (&states[tid]); // between 0 and 1
}
__device__ float get_float4lim(float v) { return v; }
__device__ float get_float4lim(float2 v) { return length(v); }
__device__ float get_float4lim(float4 v) { return length(v); }
__global__ void calc_limits3D(uint3 IB, uint3 IE, uint3 blkSz, uint3 Nthr, float2* fLims) {
float2 fLim;
IB+=blkSz*blockIdx*make_uint3(blockDim)+make_uint3(threadIdx.x/(Nthr.y*Nthr.z), (threadIdx.x/Nthr.z)%Nthr.y, threadIdx.x%Nthr.z);
IE=min(IE,IB+blkSz);
//if(threadIdx.x==0) printf("Blk %d from (%d,%d,%d) to (%d,%d,%d)\n",blockIdx.x+gridDim.x*(blockIdx.y+gridDim.y*blockIdx.z),IB.x,IB.y,IB.z, IE.x,IE.y,IE.z);
fLim.x = fLim.y = get_float4lim(tex3D(data3D_tex, IB.x,IB.y,IB.z));
for(int ix=IB.x; ix<IE.x; ix+=Nthr.x) for(int iy=IB.y; iy<IE.y; iy+=Nthr.y) for(int iz=IB.z; iz<IE.z; iz+=Nthr.z) {
float v=get_float4lim(tex3D(data3D_tex, ix,iy,iz));
if(v<fLim.x) fLim.x = v;
if(v>fLim.y) fLim.y = v;
}
__shared__ float2 fLim_sh[512];
fLim_sh[threadIdx.x] = fLim;
__syncthreads();
if(threadIdx.x >= warpSize) return;
for(int i=threadIdx.x; i<blockDim.x; i+=warpSize) {
float2 v=fLim_sh[i];
if(v.x<fLim.x) fLim.x = v.x;
if(v.y>fLim.y) fLim.y = v.y;
}
fLim_sh[threadIdx.x] = fLim;
if(threadIdx.x>0) return;
for(int i=0; i<warpSize; i++) {
float2 v=fLim_sh[i];
if(v.x<fLim.x) fLim.x = v.x;
if(v.y>fLim.y) fLim.y = v.y;
}
fLims[blockIdx.x+gridDim.x*(blockIdx.y+gridDim.y*blockIdx.z)] = fLim;
//printf("Lim (%d,%d,%d) %d => %g %g\n",blockIdx.x,blockIdx.y,blockIdx.z, blockIdx.x+gridDim.x*(blockIdx.y+gridDim.y*blockIdx.z), fLim.x,fLim.y);
}
float2 set_lim_from_tex(uint3 IB, uint3 N) {
//if(N.x*N.y*N.z<512) { printf("Too small picture\n"); return make_float2(0.,1.); }
int ind=0; uint3 Ns=N, Nthr;
if(Ns.x<Ns.y) { ind += 3; int t=Ns.x; Ns.x=Ns.y; Ns.y=t; }
if(Ns.y<Ns.z) { ind ++; int t=Ns.y; Ns.y=Ns.z; Ns.z=t; }
if(Ns.x<Ns.y) { ind ++; int t=Ns.x; Ns.x=Ns.y; Ns.y=t; }
for(Nthr.z=1; Nthr.z<8&&Nthr.z<Ns.z; Nthr.z*=2);
for(Nthr.y=1; Nthr.y*Nthr.z<64&&Nthr.y<Ns.y; Nthr.y*=2);
for(Nthr.x=1; Nthr.x*Nthr.y*Nthr.z<512&&Nthr.x<Ns.x; Nthr.x*=2);
//printf("set Lim from tex: from (%d,%d,%d) size (%d,%d,%d); ind %d; Nthr: (%d,%d,%d)\n",IB.x,IB.y,IB.z, N.x,N.y,N.z, ind, Nthr.x,Nthr.y,Nthr.z);
if(ind%3==2) { int t=Nthr.x; Nthr.x=Nthr.y; Nthr.y=t; }
if(ind%3>=1) { int t=Nthr.y; Nthr.y=Nthr.z; Nthr.z=t; }
if(ind >=3) { int t=Nthr.x; Nthr.x=Nthr.y; Nthr.y=t; }
uint3 Sblk=make_uint3(512), Nblk=(N+(Sblk-1))/Sblk;
int NNblk=Nblk.x*Nblk.y*Nblk.z;
float2 fLim,* fLims=0,* fLimsD=0;
if(CHECK_ERROR(cudaMalloc((void**) &fLimsD, NNblk*sizeof(float2)))) throw(-1);
//printf("Lim: %d*%d*%d => %d Blks, %d %d %d Thrs\n",Nblk.x,Nblk.y,Nblk.z,NNblk, Nthr.x,Nthr.y,Nthr.z);
calc_limits3D<<<Nblk,Nthr.x*Nthr.y*Nthr.z>>>(IB, IB+N, Sblk, Nthr, fLimsD);
fLims=new float2[NNblk];
if(CHECK_ERROR(cudaMemcpy(fLims, fLimsD, NNblk*sizeof(float2), cudaMemcpyDeviceToHost))) throw(-1);
CHECK_ERROR(cudaFree(fLimsD));
fLim = *fLims;
for(int i=1; i<NNblk; i++) {
if(fLims[i].x<fLim.x) fLim.x = fLims[i].x;
if(fLims[i].y>fLim.y) fLim.y = fLims[i].y;
}
delete fLims;
return fLim;
}
int print_help();
void im3D_pars::print_help() {
::print_help();
printf("\
======= Общее управление программой:\n\
«ESC» \tВыход из программы\n\
3¦2 \tпереключает рендеринг 3D¦2D в сечениях (%dD)\n\
4 \tв режиме 3D переключает режим визуализации потенциал/градиентный режим/на поверхности\n\
<Enter¦BackSpace>\tПереход к следующему¦предыдущему массиву\n\
w¦W \tСохранение текущего набора опций визуализации в файл «%s»¦то же, но предыдущий набор не переписывается, можно сохранить произвольное число наборов последовательно\n\
r¦R \tЗагрузка ранее сохранённых наборов опций последовательно¦загрузка без перехода к следующему набору\n\
«Ctr-r»\tСброс параметров в значения по умолчанию\n\
f¦F \tПереход к началу¦концу файла сохранённых наборов опций\n\
v¦V \tУвеличение¦уменьшение уровня вывода диагностики (%d)\n\
«Ctr-v»\tПечатает диагностику, особено актуально, если заголовок окна не виден\n\
«Ctr-w»\tПереключает режим вывода в заголовок окна диагностики по умолчанию\n\
s¦S \tСохранение картинки в формате png|вместе с зарамочным оформлением в gnuplot\n\
~ \tВключает показ положения выделенной точки (x0,y0,z0), xyz при этом работают в режиме 2D\n\
#¦$¦%% \tПереключение режима зарамочного оформления режима 3D: сетка¦рёбра бокса¦передний план\n\
@ \tПереключает режим фона: однотонный/сохранённая картинка/2D сечения через выделенную точку/через сетку 3D\n\
! \tСохраняет картинку для фона\n\
«Ctr-z»\tУстанавливает координаты точки, относительно которой происходит вращение, в значение выделенной, при этом сдвигается сетка 3D\n\
k¦K \tУменьшение¦увеличение ширины линий контура\n\
«Ctr-k»\tВключает режим прорисовки линий контура (в 2D)\n\
m¦M \tУменьшение¦увеличение шага вдоль луча для соответствующего изменения точности (%g), ВНИМАНИЕ: при мелком шаге может очень медленно прорисовывать\n\
e¦E \tРазмазывание луча по горизонтали для соответствующего изменения муара (%g),\n\
d¦D \tУвеличение¦уменьшение плотности цвета при суммировании вдоль луча (%g)\n\
j¦J \tУменьшение¦увеличение порога цветовой плотности (%g)\n\
«Ctr-f»\tПереключает режим интерполяции в текстуре с режима по умолчанию (линейный в 3D/point в 2D)\n\
«Ctr-d»\tв 3D режиме отсекает часть массива\n\
«Ctr-L»\tЧитает параметры командной строки из текстового файла <add.opt>, формат: 1 параметр на строку, список значений без кавычек\n\
a¦A \tУстановка пределов палитры из пределов текущего массива ¦ из значений fMin..fMax\n\
«Ctr-a»\tУстановка значений fMin..fMax из текущих пределов палитры\n\
«Ctr-s»\tУстановка пределов палитры, используя пределы массива в сечении поперёк выбранной оси\n\
1 \tпереключает (циклически, по xyz) ось, вдоль которой строится одномерный график в gnuplot (%c)\n\
o¦«Ctr-o»\tВыводит в окно gnuplot сечение вдоль выбранной оси¦то же с перерисовкой\n\
O¦Q \tДля точки (x0,y0,z0): Печатает в терминале значение текущего поля и выводит в файл сечения вдоль лучей, проходящих через неё¦Добавляет сенсор\n\
q¦«Ctr-q»\tсохраняет значения сенсоров в файле sensors.dat¦выводит в окно gnuplot запись сенсоров\n\
======= Управление динамикой:\n\
g¦G \tОтключение¦включение постоянной перерисовки в цикле GLUT (%d)\n\
xyz¦XYZ\tВ режиме 2D, а также в 3D в режиме визуализации положения сечений: Увеличение¦уменьшение координат выделенной точки параллелепипеда данных (%d,%d,%d)\n\
xyz¦XYZ\tВ режиме 3D: Вращение вокруг осей x,y,z вперёд¦назад (%g,%g,%g)\n\
u¦U \tВ режиме 3D: Приближение¦удаление объекта (%g)\n\
======= Управление мышью (L¦R¦M --- левая¦правая¦средняя кнопки):\n\
L \tВ режиме 2D переустанавливает срезы, исходя из координат выбранной точки\n\
L¦R¦M \tВ режиме 3D: вращение¦изменение масштаба¦сдвиг рисунка\n\
«Ctr-L»\tСдвиг картинки под курсором\n\
В районе палитры (верхние 20 точек):\n\
L¦R¦M \tустанавливает нижний¦верхний пределы¦центр палитры, исходя из x-координаты выбранной точки\n\
L¦R \tВ режиме «Ctl-t» (бинарной прозрачности) делает цвет прозрачным¦видимым\n\
", render_type, optfName, type_diag_flag, tstep, randR, density, opacity, "xyz"[sec1Daxis], recalc_always, ix0, iy0, iz0, viewRotation[0], viewRotation[1], viewRotation[2], viewTranslation[2]);
imHost.print_help();
}
// normal shift Ctrl
//«DEL»
//` 67 %^&* `1234567890
// i I e yu []
// ;' H :" d ghj ;'\
// n ,. BN <> zx bnm,./
bool im3D_pars::key_func(unsigned char key, int x, int y) {
recalc_at_once=true;
size_t rs=0;
if(key != addTitleStr[TitleStrInd]) addTitleStr[(TitleStrInd++)%4] = key;
switch(key) {
case 'A': imHost.set_lim(fMin, fMax); return true;
case 'a': { float2 fLim=set_lim_from_tex(make_uint3(0,0,0), make_uint3(Nx,Ny,Nz)); imHost.set_lim(fLim.x, fLim.y); } return true;
// case 'a': { float2 fLim=make_float2(-0.15,+0.15); imHost.set_lim(fLim.x, fLim.y); } return true;
case 1: { fMin = imHost.fmin; fMax = imHost.fmax; } return true;
case 19: { float2 fLim=make_float2(-1,1);
switch(sec1Daxis) {
case 0: fLim=set_lim_from_tex(make_uint3(ix0,0,0), make_uint3(1,Ny,Nz)); break;
case 1: fLim=set_lim_from_tex(make_uint3(0,iy0,0), make_uint3(Nx,1,Nz)); break;
case 2: fLim=set_lim_from_tex(make_uint3(0,0,iz0), make_uint3(Nx,Ny,1)); break;
}; imHost.set_lim(fLim.x, fLim.y);
} return true;
case 18: ::reset(this); return true;
case 'w': {
printf("Drop %ldB fpal & %ldB im3D\n", sizeof(fpal_pars), sizeof(im3D_pars4save));
if(optfid>=0 && im3DIopt_shift) rs=lseek(optfid,-im3DIopt_shift, SEEK_CUR);
}
case 'W': if(optfid>=0) {
int sz=sizeof(fpal_pars); im3DIopt_shift = 0;
rs=write(optfid, &sz, sizeof(sz)); im3DIopt_shift += rs;
rs=write(optfid, &imHost, sz); im3DIopt_shift += rs;
sz = sizeof(im3D_pars4save);
rs=write(optfid, &sz, sizeof(sz)); im3DIopt_shift += rs;
rs=write(optfid, this, sz); im3DIopt_shift += rs;
} recalc_at_once=false; return true;
case 'R': if(optfid>=0 && im3DIopt_shift) rs=lseek(optfid,-im3DIopt_shift, SEEK_CUR);
case 'r': if(optfid>=0) {
int sz=ld_sz.x;
im3DIopt_shift = 0;
if(sz==0) {
rs = read(optfid, &sz, sizeof(sz));
if(sz<=0 || sz>sizeof(fpal_pars)) { printf("Illegal Drop format\n"); return true; }
im3DIopt_shift += rs;
}
rs=read(optfid, &imHost, sz); printf("Load %ldB fpal of %ldB", rs, sizeof(fpal_pars)); im3DIopt_shift += rs;
sz=ld_sz.y;
if(sz==0) {
rs = read(optfid, &sz, sizeof(sz));
if(sz<=0 || sz>sizeof(im3D_pars4save)) { printf("Illegal Drop format\n"); return true; }
im3DIopt_shift += rs;
}
rs=read(optfid, this, sz); printf(" & %ldB im3D of %ldB\n", rs, sizeof(im3D_pars4save)); im3DIopt_shift += rs;
initTex();
} return true;
case 'f': if(optfid>=0) lseek(optfid,0, SEEK_SET); recalc_at_once=false; return true;
case 'F': if(optfid>=0) lseek(optfid,0, SEEK_END); recalc_at_once=false; return true;
case 23: baseTitleFlag ++; return true; //recalc_at_once=false;
case 22: recalc_at_once=false;
printf("%s\nFrame %d (%.2f/%.2f fps), last run Times: %7.2f msec\n", WinTitle, imHost.nFrame, SmoothFPS, 1000./runTime, runTime);
return true;
case 'v': recalc_at_once=false; type_diag_flag++; return true;
case 'V': recalc_at_once=false; type_diag_flag--; return true;
case 'S': recalc_at_once=save_gp(); return true;
case 's': recalc_at_once=save_png(imHost.nFpng); return true;
case 'e': randR *= sqrt(sqrt(2)); return true;
case 'E': randR /= sqrt(sqrt(2)); return true;
case 'm': tstep /= sqrt(sqrt(2)); density /= sqrt(sqrt(2)); return true;
case 'M': tstep *= sqrt(sqrt(2)); density *= sqrt(sqrt(2)); return true;
case 'd': density *= sqrt(sqrt(2)); return true;
case 'D': density /= sqrt(sqrt(2)); return true;
case 'j': opacity = 1.0 - (1.0-opacity)/sqrt(sqrt(2)); return true;
case 'J': opacity = 1.0 - (1.0-opacity)*sqrt(sqrt(2)); return true;
case '@': draw_bmp4backgrownd = (draw_bmp4backgrownd+1)%4; return true;
case '#': draw_mesh_flag ^= true; return true;
case '$': draw_box_flag ^= true; return true;
case '%': draw_fg_flag ^= true; return true;
case 4 : dissect_box_flag ^= true; return true;
case '~': draw_sec_xyz_flag ^= true; return true;
case 6 : filterMode_flag ^= true; initTex(); return true;
case '!': save_bmp4backgrownd(); return true;
case '2': render_type=2; initTex(); return true;
case '3': render_type=3; initTex(); return true;
case '4': mode3D=(mode3D+1)%3; return true;//grad_mode ^= true; imHost.palDim = 1 + 2*grad_mode; return true;
case '5': imHost.pal3Daxis = (imHost.pal3Daxis+1)%3; return true;
case 'g': recalc_always=false; return true;
case 'G': recalc_always=true; return true;
case 'Q': recalc_at_once=false; icalcNdrop.add_sensor(ix0, iy0, iz0); return true;
case 'q': icalcNdrop.step(); return true;
case 11 : contour_flag ^= true; return true;
case 'k': contour_width *= 1.2; return true;
case 'K': contour_width /= 1.2; return true;
case 12 : {
FILE* cmd=fopen("add.opt", "r"); if(cmd) {
char str[1024],* argv[2]; argv[0] = str;
while(fgets(str, 1024, cmd)) {
char* c=strchr(str, ' ');
if(c) {
if(*c==' ') *c = 0;
argv[1] = c+1;
} else argv[1] = str;
init_from_command_line(argv);
}
fclose(cmd);
}}
//recalc_at_once=false;
return true;
case 17: recalc_at_once=false; icalcNdrop.plot_sensors(); return true;
case 'O': recalc_at_once=false; save_section(); return true;
case '1': sec1Daxis = (sec1Daxis+1)%4;
printf("1D section for gnuplot set to %c\n","xyzA"[sec1Daxis]);
case 15: recalc_at_once=false;
if(gpPipe) { pclose(gpPipe); gpPipe = 0; }
case 'o': recalc_at_once=false; plot_section(); return true;
case 'b': xyz = &icalcNdrop; return true;
case 26:
RotPoint[0] = float(ix0)/Nx;
RotPoint[1] = float(iy0)/Ny;
RotPoint[2] = float(iz0)/Nz;
return true;
case 'x': case 'X': case 'y': case 'Y': case 'z': case 'Z': case 'u': case 'U':
if(render_type==2 || draw_sec_xyz_flag) { switch(key) {
case 'x': xyz2D.set(&ix0, Nx, 1); break;
case 'X': xyz2D.set(&ix0, Nx,-1); break;
case 'y': xyz2D.set(&iy0, Ny, 1); break;
case 'Y': xyz2D.set(&iy0, Ny,-1); break;
case 'z': xyz2D.set(&iz0, Nz, 1); break;
case 'Z': xyz2D.set(&iz0, Nz,-1); break;
default: return true;
} xyz = &xyz2D; xyz->step();
} else if(render_type==3) { switch(key) {
case 'x': xyz3D.set(&viewRotation[0], 0.5f); break;
case 'X': xyz3D.set(&viewRotation[0],-0.5f); break;
case 'y': xyz3D.set(&viewRotation[1], 0.5f); break;
case 'Y': xyz3D.set(&viewRotation[1],-0.5f); break;
case 'z': xyz3D.set(&viewRotation[2], 0.5f); break;
case 'Z': xyz3D.set(&viewRotation[2],-0.5f); break;
case 'u': xyz3D.set(&viewTranslation[2], 0.01f); break;
case 'U': xyz3D.set(&viewTranslation[2],-0.01f); break;
} xyz = &xyz3D; xyz->step();
}
return true;
case 27: clear4exit(); exit(0);
default:
if(imHost.key_func(key, x, y)) return true;
}
recalc_at_once=false;
if(rs==0) return false;
return false;
}
struct MKstates {
int ox, oy;
int buttonState;
int modState;
MKstates(): ox(0),oy(0), buttonState(0),modState(0) {}
void correct_screen_coor(int& x, int& y) {
x -= im2D.xPos;
y += im2D.yPos-(glutGet(GLUT_WINDOW_HEIGHT)-im2D.Ny);
}
void grabState(int button, int state, int x, int y) {
modState = glutGetModifiers();
if(state == GLUT_DOWN) buttonState |= 1<<button;
else if(state == GLUT_UP) buttonState = 0;
ox = x;
oy = y;
}
} mk_state;
bool im3D_pars::special_func(unsigned char key, int x, int y) {
mk_state.correct_screen_coor(x,y);
if(type_diag_flag>=2) printf("special_func, keyN=%d, coors=(%d,%d)\n", key, x, y);
recalc_at_once=true;
size_t rs=0;
if(key != addTitleStr[TitleStrInd]) addTitleStr[(TitleStrInd++)%4] = key;
int modState = glutGetModifiers(), zoom=10;
if(modState == GLUT_ACTIVE_CTRL) zoom *= 100;
if(modState == GLUT_ACTIVE_SHIFT) zoom *= 10;
if(modState == GLUT_ACTIVE_ALT) zoom /= 10;
switch(key) {
case GLUT_KEY_PAGE_UP: im2D.yPos = glutGet(GLUT_WINDOW_HEIGHT)-im2D.Ny; return true;
case GLUT_KEY_PAGE_DOWN: im2D.yPos = 0; return true;
case GLUT_KEY_DOWN: im2D.yPos += zoom; if(im2D.yPos>0) im2D.yPos=0; return true;
case GLUT_KEY_UP: im2D.yPos -= zoom; {
int yPosMax=glutGet(GLUT_WINDOW_HEIGHT)-im2D.Ny;
if(im2D.yPos<yPosMax) im2D.yPos = yPosMax;
} return true;
case GLUT_KEY_HOME: im2D.xPos = 0; return true;
case GLUT_KEY_END: im2D.xPos = glutGet(GLUT_WINDOW_WIDTH)-im2D.Nx; return true;
case GLUT_KEY_LEFT: im2D.xPos += zoom; if(im2D.xPos>0) im2D.xPos=0; return true;
case GLUT_KEY_RIGHT: im2D.xPos -= zoom; {
int xPosMax=glutGet(GLUT_WINDOW_WIDTH)-im2D.Nx;
if(im2D.xPos<xPosMax) im2D.xPos = xPosMax;
} return true;
}
recalc_at_once=false;
if(rs==0) return false;
return false;
}
void im3D_pars::mouse_func(int button, int state, int x, int y) {
mk_state.correct_screen_coor(x,y);
if(y<20 && state == GLUT_DOWN) {
if(imHost.transparency_discrete_flag) {
int ic=floor(0.5+(imHost.pscale)*float(x)/float(bNx));
switch(button) {
case 0: imHost.transparency_mode |= (1<<ic); break;
case 1: imHost.transparency_mode ^= (1<<ic); break;
case 2: imHost.transparency_mode &= ~(1<<ic); break;
};
} else {
float f=imHost.fmin + x/float(bNx)*(imHost.fmax-imHost.fmin);
switch(button) {
case 0: imHost.set_lim(f,imHost.fmax); break;
case 2: imHost.set_lim(imHost.fmin,f); break;
case 1:
float df=(f-imHost.fmin)>(imHost.fmax-f)?(f-imHost.fmin):(imHost.fmax-f);
imHost.set_lim(f-df,f+df); break;
};
if(type_diag_flag>=3) printf("mouse pal: %d,%d, button %d, state %d\n", x,y, button, state);
recalc_at_once=true;
}
return;
}
mk_state.grabState(button, state, x,y);
if(render_type==3) {
if (state == GLUT_UP) {
RotMatr R=RotMatr(0,viewRotation[0]), Ry=RotMatr(1,viewRotation[1]), Rz=RotMatr(2,viewRotation[2]), RxT=RotMatr(0,viewRotationTmp[0]), RyT=RotMatr(1,viewRotationTmp[1]);
R *= Ry; R *= Rz; R *= RxT; R *= RyT;
/*for(int i=0; i<3; i++) {
printf("(");
float s2=0.0;
for(int j=0; j<3; j++) { printf("\t%g", R.v[i][j]); s2 += R.v[i][j]*R.v[i][j]; }
printf("); %g\n", s2);
}*/
//printf("Mouse: (%g,%g,%g)+(%g,%g) -> ", viewRotation[0], viewRotation[1], viewRotation[2], viewRotationTmp[0], viewRotationTmp[1]);
double Sy=-R.v[2][0], Cy=sqrt(1.-Sy*Sy), phi[3];
phi[1] = atan2(Sy,Cy);
if(Cy>0) {
double Sx=R.v[2][1]/Cy, Cx=R.v[2][2]/Cy; phi[0]=atan2(Sx,Cx);
double Sz=R.v[1][0]/Cy, Cz=R.v[0][0]/Cy; phi[2]=atan2(Sz,Cz);
} else {
double Cxz=R.v[1][1], Sxz=R.v[0][1]*Sy;
phi[0]=atan2(Sxz, Cxz); phi[2]=0;
}
for(int i=0; i<3; i++) viewRotationTmp[i] = 0;
for(int i=0; i<3; i++) viewRotation[i] = phi[i]*180.0/M_PI;
//printf(" (%g,%g,%g)\n", viewRotation[0], viewRotation[1], viewRotation[2]);
}
} else {
if (state == GLUT_DOWN && mk_state.modState != GLUT_ACTIVE_CTRL) { if(0<=x && x<bNx && 0<=y && y<bNy) reset0(x,bNy-1-y); }
}
recalc_at_once=true;
glutPostRedisplay();
}
void im3D_pars::motion_func(int x, int y) {
mk_state.correct_screen_coor(x,y);
if(type_diag_flag>=3) printf("motion func: %d,%d -> %d,%d\n",mk_state.ox,mk_state.oy, x,y);
if(y<20) {
return;
}
float dx, dy;
dx = (float)(x - mk_state.ox);
dy = (float)(y - mk_state.oy);
if(render_type==2) {
if(mk_state.modState == GLUT_ACTIVE_CTRL) {
shift0(mk_state.ox,bNy-1-mk_state.oy, x,bNy-1-y);
}
} else {
if(mk_state.modState == GLUT_ACTIVE_CTRL) {
eyePoint.x = x;
eyePoint.y = bNy-y;
} else {
if (mk_state.buttonState == 4) // right = zoom
viewTranslation[2] += dy / 100.0f;
else if (mk_state.buttonState == 2) { // middle = translate
viewTranslation[0] += dx / 100.0f;
viewTranslation[1] -= dy / 100.0f;
}
else if (mk_state.buttonState == 1) { // left = rotate
viewRotationTmp[0] += dy / 5.0f; viewRotationTmp[1] += dx / 5.0f;
}
}
}
mk_state.ox = x;
mk_state.oy = y;
recalc_at_once=true;
glutPostRedisplay();
}
//int cfX=0, cfY=0;
__global__ void im3Dclear(uchar4 bgk_col) {
int x=blockIdx.x*blockDim.x+threadIdx.x;
int y=blockIdx.y*blockDim.y+threadIdx.y;
if(y<im3D.bNy && x<im3D.bNx) im.bmp[x+y*im3D.bNx] = bgk_col;
}
template<int cx, int cy, int cz>
__global__ void im3Ddraw_any(int sh, int i0) {
int x1=blockIdx.x*blockDim.x+threadIdx.x, x2=blockIdx.y*blockDim.y+threadIdx.y;
int p1=sh%im3D.bNx+x1, p2=sh/im3D.bNx+x2;
if(0>p1 || p1>=im3D.bNx || 0>p2 || p2>=im3D.bNy) return;
int ix = cx==0?i0:((cx==1?x1:x2)*im3D.x_zoom);
int iy = cy==0?i0:((cy==1?x1:x2)*im3D.y_zoom);
int iz = cz==0?i0:((cz==1?x1:x2)*im3D.z_zoom);
if(ix<im3D.Nx && iy<im3D.Ny && iz<im3D.Nz) {
uchar4 c=im.get_color(tex3D(data3D_tex, ix,iy,iz));
if(im3D.draw_sec_xyz_flag && (abs(ix-im3D.ix0)<20 && abs(iy-im3D.iy0)<20 && abs(iz-im3D.iz0)<20) && (cx>0 && ix==im3D.ix0 || cy>0 && iy==im3D.iy0 || cz>0 && iz==im3D.iz0)) c = make_uchar4(255-c.x,255-c.y,255-c.z,c.w);
#if DATA_VECTOR_SZ==1
if(im3D.contour_flag) {
for(int i=0; i<im3D.cntr_num; i++) {
float vp=tex3D(data3D_tex, ix+im3D.contour_width,iy,iz);
float vm=tex3D(data3D_tex, ix-im3D.contour_width,iy,iz);
float lv=im3D.cntr_levels[i];
if(vp != 0 && vm != 0 && (vp>0 ^ vm<0) && (vp>lv ^ vm>lv)) { c = make_uchar4(255-c.x,255-c.y,255-c.z,c.w); continue; }
vp=tex3D(data3D_tex, ix,iy+im3D.contour_width,iz);
vm=tex3D(data3D_tex, ix,iy-im3D.contour_width,iz);
if(vp != 0 && vm != 0 && (vp>0 ^ vm<0) && (vp>lv ^ vm>lv)) { c = make_uchar4(255-c.x,255-c.y,255-c.z,c.w); continue; }
vp=tex3D(data3D_tex, ix,iy,iz+im3D.contour_width);
vm=tex3D(data3D_tex, ix,iy,iz-im3D.contour_width);
if(vp != 0 && vm != 0 && (vp>0 ^ vm<0) && (vp>lv ^ vm>lv)) { c = make_uchar4(255-c.x,255-c.y,255-c.z,c.w); continue; }
}
//if((1<v && v<1623/1536.) || (-1>v && v>-1623/1536.)) c = make_uchar4(255-c.x,255-c.y,255-c.z,c.w);
}
#endif
im.bmp[sh+x1+x2*im3D.bNx] = c;
}
//if(x1==128 && x2==128) printf("res(%d,%d,%d)=%g\n", ix,iy,iz, tex3D(data3D_tex, ix,iy,iz));
}
__global__ void draw_pal() {
int x=blockIdx.x*blockDim.x+threadIdx.x;
uchar4 col=im.get_color(im.fmin+(float(x)/im3D.bNx)*(im.fmax-im.fmin));
uchar4* bmp = im.bmp+im3D.pal_sh;
for(int y=0; y<20; y++, bmp += im3D.bNx) bmp[x] = col;
}
__global__ void negate() {
int x=blockIdx.x*blockDim.x+threadIdx.x;
uchar4 col=make_uchar4(255,255,255,255);
uchar4* bmp = im.bmp+x;
for(int y=0; y<im3D.bNy; y++) bmp[y*im3D.bNx] = col-bmp[y*im3D.bNx];
}
float invViewMatrix[12];
typedef struct {
float4 m[3];
} float3x4;
//Код 3D рендеринга позаимствован из примеров cuda5.5: 2_Graphics/volumeRender/volumeRender_kernel.cu
__constant__ float3x4 c_invViewMatrix; // inverse view matrix
struct Ray {
float3 o; // origin
float3 d; // direction
};
__device__ int intersectBox(Ray r, float3 boxmin, float3 boxmax, float *tnear, float *tfar) {
// compute intersection of ray with all six bbox planes
float3 invR = make_float3(1.0f) / (r.d+1e-5);
float3 tbot = invR * (boxmin - r.o);
float3 ttop = invR * (boxmax - r.o);
// re-order intersections to find smallest and largest on each axis
float3 tmin = fminf(ttop, tbot);
float3 tmax = fmaxf(ttop, tbot);
// find the largest tmin and the smallest tmax
float largest_tmin = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z));
float smallest_tmax = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z));
*tnear = largest_tmin;
*tfar = smallest_tmax;
if(im3D.dissect_box_flag) {
float3 boxmid=boxmin+make_float3(im3D.BoxFactor[0]*(im3D.ix0+1), im3D.BoxFactor[1]*(im3D.iy0+1), im3D.BoxFactor[2]*(im3D.iz0+1));
float3 ttopC= invR * (boxmid - r.o);
float3 tminC = fminf(ttopC, tbot);
float3 tmaxC = fmaxf(ttopC, tbot);
float largest_tminC = fmaxf(fmaxf(tminC.x, tminC.y), fmaxf(tminC.x, tminC.z));
float smallest_tmaxC = fminf(fminf(tmaxC.x, tmaxC.y), fminf(tmaxC.x, tmaxC.z));
if(smallest_tmaxC > largest_tminC && largest_tmin == largest_tminC) *tnear = smallest_tmaxC;
}
return smallest_tmax > largest_tmin;
}
// transform vector by matrix (no translation)
__device__
float3 mul(const float3x4 &M, const float3 &v)
{
float3 r;
r.x = dot(v, make_float3(M.m[0]));
r.y = dot(v, make_float3(M.m[1]));
r.z = dot(v, make_float3(M.m[2]));
return r;
}
// transform vector by matrix with translation
__device__
float4 mul(const float3x4 &M, const float4 &v)
{
float4 r;
r.x = dot(v, M.m[0]);
r.y = dot(v, M.m[1]);
r.z = dot(v, M.m[2]);
r.w = 1.0f;
return r;
}
__device__ uchar4 rgbaFloatToInt(float4 rgba, uchar4 bk) {
float a=rgba.w, da=(1.-a)/255.;
rgba.x = __saturatef(bk.x*da+a*rgba.x); // clamp to [0.0, 1.0]
rgba.y = __saturatef(bk.y*da+a*rgba.y);
rgba.z = __saturatef(bk.z*da+a*rgba.z);
rgba.w = __saturatef(rgba.w);
return make_uchar4((rgba.x*255.f), (rgba.y*255.f), (rgba.z*255.f), (rgba.w*255.f));
}
__device__ uchar4 rgbaFloatToInt(float4 rgba) {
rgba.x = __saturatef(rgba.x); // clamp to [0.0, 1.0]
rgba.y = __saturatef(rgba.y);
rgba.z = __saturatef(rgba.z);
rgba.w = __saturatef(rgba.w);
return make_uchar4((rgba.x*255.f), (rgba.y*255.f), (rgba.z*255.f), (rgba.w*255.f));
}
__global__ void draw_pal3D() {
float x=2.0f*(0.5f+blockIdx.x)/gridDim.x-1.0f, y=2.0f*(0.5f+threadIdx.x)/blockDim.x-1.0f;
float r2=x*x+y*y;
if(r2>1.0f) return;
float r=sqrt(r2), r1=sqrt(1.0f-r2);
uchar4* bmp = im.bmp+(im3D.pal_sh+im3D.bNx*(int(threadIdx.x)-int(blockDim.x/2))+blockIdx.x);
bmp[0] = rgbaFloatToInt(im.get_color_for3D(make_float4(x,y,0,1)));
bmp[blockDim.x] = rgbaFloatToInt(im.get_color_for3D(make_float4(0,y,x,1)));
bmp[2*blockDim.x] = rgbaFloatToInt(im.get_color_for3D(make_float4(x,0,y,1)));
}
__device__ float smooth(float x) { return __saturatef(1.0f-x*x); }
//------------------------------
inline __device__ void set_boxMinMax(float3& boxMin, float3& boxMax) {
float3 boxSize=make_float3(im3D.BoxFactor[0]*im3D.Nx, im3D.BoxFactor[1]*im3D.Ny, im3D.BoxFactor[2]*im3D.Nz);
//boxMax = 0.5f*boxSize;
//boxMin =-0.5f*boxSize;
//boxMax = boxSize;
float3 cntr=(float3&)im3D.RotPoint*boxSize;
boxMax = boxSize-cntr;
boxMin =-cntr;
}
inline __device__ void set_eyeRay(Ray& eyeRay, float x, float y) {
const float dbNxy=2.0f/(im3D.bNx+im3D.bNy);
const int Nsum=im3D.Nx+im3D.Ny+im3D.Nz;
eyeRay.o = make_float3(mul(c_invViewMatrix, make_float4(0.0f, 0.0f, 0.0f, 0.32f*Nsum)));
eyeRay.d = normalize(make_float3((x-im3D.eyePoint.x)*dbNxy, (y-im3D.eyePoint.y)*dbNxy, -2.0f));
eyeRay.d = mul(c_invViewMatrix, eyeRay.d);
}
__device__ uchar4& get_backgrownd(Ray r, float3 boxmin, float3 boxmax, int bmp_sh) {
float3 bkgr_col=(float3&)im3D.bkgr_col, box_shrink=(float3&)im3D.box_shrink;
float3 boxMin=box_shrink*boxmin, boxMax=box_shrink*boxmax;
float3 fcol=make_float3(0);
uchar4& vbmp=im.bmp[bmp_sh];
if(im3D.draw_bmp4backgrownd && im.bmp4backgrownd != 0) vbmp = im.bmp4backgrownd[bmp_sh];
else { fcol = bkgr_col; vbmp = make_uchar4(0,0,0,0); }
if(im3D.draw_mesh_flag || im3D.draw_box_flag) {
float3 invR = make_float3(1.0f) / (r.d+1e-5);
float3 tB = invR * (boxMin - r.o);
float3 tT = invR * (boxMax - r.o);
float tz=r.d.z<0?tB.z:tT.z, xZ=r.o.x+r.d.x*tz, yZ=r.o.y+r.d.y*tz;
float ty=r.d.y<0?tB.y:tT.y, zY=r.o.z+r.d.z*ty, xY=r.o.x+r.d.x*ty;
float tx=r.d.x<0?tB.x:tT.x, yX=r.o.y+r.d.y*tx, zX=r.o.z+r.d.z*tx;
float mval=im3D.Dmesh;
float3 mb=(float3&)im3D.MeshBox;
float3 ms=(float3&)im3D.MeshShift;
if(im3D.draw_box_flag) {
float xZn=xZ-boxmin.x, yZn=yZ-boxmin.y, xZx=boxmax.x-xZ, yZx=boxmax.y-yZ;
float zYn=zY-boxmin.z, xYn=xY-boxmin.x, zYx=boxmax.z-zY, xYx=boxmax.x-xY;
float yXn=yX-boxmin.y, zXn=zX-boxmin.z, yXx=boxmax.y-yX, zXx=boxmax.z-zX;
float zval=im3D.Dmesh, dm=im3D.Dmesh;
if(xZn>=-dm && yZn>=-dm && xZx>=-dm && yZx>=-dm) {
if(im3D.draw_mesh_flag) { mval=fminf(mval,fminf(fabsf(remainderf(xZ-ms.x, mb.x)), fabsf(remainderf(yZ-ms.y, mb.y)))); }
zval=fminf(zval,fminf(fminf(fabs(xZn), fabs(yZn)), fminf(fabs(xZx), fabs(yZx))));
}
if(zYn>=-dm && xYn>=-dm && zYx>=-dm && xYx>=-dm) {
if(im3D.draw_mesh_flag) { mval=fminf(mval,fminf(fabsf(remainderf(zY-ms.z, mb.z)), fabsf(remainderf(xY-ms.x, mb.x)))); }
zval=fminf(zval,fminf(fminf(fabs(xYn), fabs(zYn)), fminf(fabs(xYx), fabs(zYx))));
}
if(yXn>=-dm && zXn>=-dm && yXx>=-dm && zXx>=-dm) {
if(im3D.draw_mesh_flag) { mval=fminf(mval,fminf(fabsf(remainderf(yX-ms.y, mb.y)), fabsf(remainderf(zX-ms.z, mb.z)))); }
zval=fminf(zval,fminf(fminf(fabs(zXn), fabs(yXn)), fminf(fabs(zXx), fabs(yXx))));
}
float zdel=smooth(zval/im3D.Dmesh);
fcol = fcol*(1.0f-zdel)+((float3&)(im3D.box_col))*zdel;
} else {
if(xZ>=boxmin.x && yZ>=boxmin.y && xZ<=boxmax.x && yZ<=boxmax.y) mval=fminf(fabsf(remainderf(xZ-ms.x, mb.x)), fabsf(remainderf(yZ-ms.y, mb.y)));
else if(zY>=boxmin.z && xY>=boxmin.x && zY<=boxmax.z && xY<=boxmax.x) mval=fminf(fabsf(remainderf(zY-ms.z, mb.z)), fabsf(remainderf(xY-ms.x, mb.x)));
else if(yX>=boxmin.y && zX>=boxmin.z && yX<=boxmax.y && zX<=boxmax.z) mval=fminf(fabsf(remainderf(yX-ms.y, mb.y)), fabsf(remainderf(zX-ms.z, mb.z)));
}
if(im3D.draw_mesh_flag) {
float mdel=smooth(mval/im3D.Dmesh);
fcol = fcol*(1.0f-mdel)+((float3&)(im3D.mesh_col))*mdel;
}
}
vbmp = vbmp+make_uchar4(__saturatef(fcol.x)*255, __saturatef(fcol.y)*255, __saturatef(fcol.z)*255, 255);
return vbmp;
}
__device__ uchar4& get_foregrownd(Ray r, float3 boxmin, float3 boxmax, int bmp_sh) {
float3 box_shrink=(float3&)im3D.box_shrink;
float3 boxMin=box_shrink*boxmin, boxMax=box_shrink*boxmax;
float3 fcol=make_float3(0);
uchar4& vbmp=im.bmp[bmp_sh];
if(im3D.draw_mesh_flag || im3D.draw_box_flag) {
float3 invR = make_float3(1.0f) / (r.d+1e-5);
float3 tB = invR * (boxMin - r.o);
float3 tT = invR * (boxMax - r.o);
float tz=r.d.z>0?tB.z:tT.z, xZ=r.o.x+r.d.x*tz, yZ=r.o.y+r.d.y*tz;
float ty=r.d.y>0?tB.y:tT.y, zY=r.o.z+r.d.z*ty, xY=r.o.x+r.d.x*ty;
float tx=r.d.x>0?tB.x:tT.x, yX=r.o.y+r.d.y*tx, zX=r.o.z+r.d.z*tx;
float mval=im3D.Dmesh;
float3 mb=(float3&)im3D.MeshBox;
float3 ms=(float3&)im3D.MeshShift;
if(im3D.draw_box_flag) {
float xZn=xZ-boxmin.x, yZn=yZ-boxmin.y, xZx=boxmax.x-xZ, yZx=boxmax.y-yZ;
float zYn=zY-boxmin.z, xYn=xY-boxmin.x, zYx=boxmax.z-zY, xYx=boxmax.x-xY;
float yXn=yX-boxmin.y, zXn=zX-boxmin.z, yXx=boxmax.y-yX, zXx=boxmax.z-zX;
float zval=im3D.Dmesh, dm=im3D.Dmesh;
if(xZn>=-dm && yZn>=-dm && xZx>=-dm && yZx>=-dm) {
if(im3D.draw_mesh_flag) { mval=fminf(mval,fminf(fabsf(remainderf(xZ-ms.x, mb.x)), fabsf(remainderf(yZ-ms.y, mb.y)))); }
zval=fminf(zval,fminf(fminf(fabs(xZn), fabs(yZn)), fminf(fabs(xZx), fabs(yZx))));
}
if(zYn>=-dm && xYn>=-dm && zYx>=-dm && xYx>=-dm) {
if(im3D.draw_mesh_flag) { mval=fminf(mval,fminf(fabsf(remainderf(zY-ms.z, mb.z)), fabsf(remainderf(xY-ms.x, mb.x)))); }
zval=fminf(zval,fminf(fminf(fabs(xYn), fabs(zYn)), fminf(fabs(xYx), fabs(zYx))));
}
if(yXn>=-dm && zXn>=-dm && yXx>=-dm && zXx>=-dm) {
if(im3D.draw_mesh_flag) { mval=fminf(mval,fminf(fabsf(remainderf(yX-ms.y, mb.y)), fabsf(remainderf(zX-ms.z, mb.z)))); }
zval=fminf(zval,fminf(fminf(fabs(zXn), fabs(yXn)), fminf(fabs(zXx), fabs(yXx))));
}
float zdel=smooth(zval/im3D.Dmesh);
fcol = fcol*(1.0f-zdel)+((float3&)(im3D.box_col))*zdel;
} else {
if(xZ>=boxmin.x && yZ>=boxmin.y && xZ<=boxmax.x && yZ<=boxmax.y) mval=fminf(fabsf(remainderf(xZ-ms.x, mb.x)), fabsf(remainderf(yZ-ms.y, mb.y)));
else if(zY>=boxmin.z && xY>=boxmin.x && zY<=boxmax.z && xY<=boxmax.x) mval=fminf(fabsf(remainderf(zY-ms.z, mb.z)), fabsf(remainderf(xY-ms.x, mb.x)));
else if(yX>=boxmin.y && zX>=boxmin.z && yX<=boxmax.y && zX<=boxmax.z) mval=fminf(fabsf(remainderf(yX-ms.y, mb.y)), fabsf(remainderf(zX-ms.z, mb.z)));
}
if(im3D.draw_mesh_flag) {
float mdel=smooth(mval/im3D.Dmesh);
fcol = fcol*(1.0f-mdel)+((float3&)(im3D.mesh_col))*mdel;
}
}
vbmp = vbmp+make_uchar4(__saturatef(fcol.x)*255, __saturatef(fcol.y)*255, __saturatef(fcol.z)*255, 255);
return vbmp;
}
__device__ void mk_pts(int x, int y, uchar4 col) {
const int ps=2;
if(x+1<ps || x+ps>=im3D.bNx || y+1<ps || y+ps>=im3D.bNy) return;
for(int ix=1-ps; ix<ps; ix++) for(int iy=1-ps; iy<ps; iy++)
im.bmp[(iy+y)*im3D.bNx + x+ix] = col;
}
__device__ void mk_box(int x, int y, uchar4 col) {
if(x<0 || x+tileSz>=im3D.bNx || y<0 || y+tileSz>=im3D.bNy) return;
for(int ix=0; ix<tileSz; ix++) im.bmp[y*im3D.bNx + x+ix] = im.bmp[(tileSz+y)*im3D.bNx + x+ix] = col;
for(int iy=0; iy<tileSz; iy++) im.bmp[(iy+y)*im3D.bNx + x] = im.bmp[(iy+y)*im3D.bNx + x+tileSz] = col;
}
inline bool __device__ is_inside(float2 pt, float2 p0, float2 px, float2 py) {
float v1=(p0.x - pt.x) * (px.y - p0.y) - (px.x - p0.x) * (p0.y - pt.y);
float v2=(px.x - pt.x) * (py.y - px.y) - (py.x - px.x) * (px.y - pt.y);
float v3=(py.x - pt.x) * (p0.y - py.y) - (p0.x - py.x) * (py.y - pt.y);
return (v1*v2>=0.0 && v1*v3>=0.0 && v2*v3>=0.0);
}
inline float2 __device__ pt_inside(float2 pt, float2 p0, float2 px, float2 py) {
float2 res;
res.x = ((pt.x-p0.x)*(py.y-p0.y)-(pt.y-p0.y)*(py.x-p0.x))/((px.x-p0.x)*(py.y-p0.y)-(px.y-p0.y)*(py.x-p0.x));
res.y = ((pt.x-p0.x)*(px.y-p0.y)-(pt.y-p0.y)*(px.x-p0.x))/((py.x-p0.x)*(px.y-p0.y)-(py.y-p0.y)*(px.x-p0.x));
return res;
}
__global__ void save_gp3D() {
__shared__ float2 fm[3][tilesN][tilesN];//координаты точки в области с сеткой
__shared__ int hit[tilesN][tilesN];//индекс области попадания луча: 1-z 2-y 4-x 0-молоко
const int Sgp=(tilesN-1)*tileSz;
int x=blockIdx.x*Sgp+threadIdx.x*tileSz, y=blockIdx.y*Sgp+threadIdx.y*tileSz;
float3 boxMin, boxMax; set_boxMinMax(boxMin, boxMax);
boxMax=((float3&)im3D.box_shrink)*boxMax;
boxMin=((float3&)im3D.box_shrink)*boxMin;
Ray r; set_eyeRay(r, x,y);
float3 invR = make_float3(1.0f) / (r.d+1e-5);
float3 tB = invR * (boxMin - r.o);
float3 tT = invR * (boxMax - r.o);
float tz=r.d.z<0?tB.z:tT.z, xZ=r.o.x+r.d.x*tz, yZ=r.o.y+r.d.y*tz;
float ty=r.d.y<0?tB.y:tT.y, zY=r.o.z+r.d.z*ty, xY=r.o.x+r.d.x*ty;
float tx=r.d.x<0?tB.x:tT.x, yX=r.o.y+r.d.y*tx, zX=r.o.z+r.d.z*tx;
fm[2][threadIdx.x][threadIdx.y] = make_float2(xZ, yZ);
fm[1][threadIdx.x][threadIdx.y] = make_float2(zY, xY);
fm[0][threadIdx.x][threadIdx.y] = make_float2(yX, zX);
if(xZ>=boxMin.x && yZ>=boxMin.y && xZ<=boxMax.x && yZ<=boxMax.y) hit[threadIdx.x][threadIdx.y] = 1; //mk_pts(x,y, red);}
else if(zY>=boxMin.z && xY>=boxMin.x && zY<=boxMax.z && xY<=boxMax.x) hit[threadIdx.x][threadIdx.y] = 2; //mk_pts(x,y, green);}
else if(yX>=boxMin.y && zX>=boxMin.z && yX<=boxMax.y && zX<=boxMax.z) hit[threadIdx.x][threadIdx.y] = 4; //mk_pts(x,y, blue);}
else hit[threadIdx.x][threadIdx.y] = 0;
__syncthreads();
int hitA=0, hitM=0;
if(threadIdx.x<tilesN-1 && threadIdx.y<tilesN-1) {
for(int i=0;i<2;i++) for(int j=0;j<2;j++) {
int h=hit[threadIdx.x+i][threadIdx.y+j];
if(h>0) { hitA++; hitM |= h; }
}
}
int cs=abs(2*hitM-7)/2;
if(hitA==0 || hitA==4 || cs>=3) return;
bool is4tick=false, is4bnd=false, is4axis=false;
is4bnd = hitM==1 || hitM==2 || hitM==4;
is4axis= hitM==3 || hitM==5 || hitM==6;
int cp=(cs+1)%3, cm=(cs+2)%3;
float2 tick_sh={0.0,0.0}, tick2sh={0.0,0.0}; float tick_val;
const float axis_gap=60., tick_gap=20.;
float2 pt, spt={0.,0.}; float bMax[]={boxMax.x,boxMax.y,boxMax.z}, bMin[]={boxMin.x,boxMin.y,boxMin.z};
int labN=(blockIdx.x*(tilesN-1)+threadIdx.x)+gridDim.x*(tilesN-1)*(blockIdx.y*(tilesN-1)+threadIdx.y);
if(is4axis) {
float2 p0=fm[cm][threadIdx.x][threadIdx.y], px=fm[cm][threadIdx.x+1][threadIdx.y], py=fm[cm][threadIdx.x][threadIdx.y+1];
if(fabs(p0.x-bMax[cs])<fabs(p0.x-bMin[cs])) { pt.x = bMax[cs]; spt.x = axis_gap; }
else { pt.x = bMin[cs]; spt.x = -axis_gap; }
pt.y = fabs(p0.y-bMax[cp])<fabs(p0.y-bMin[cp])?bMax[cp]:bMin[cp];
tick_sh = pt_inside(pt, p0,px,py);
tick2sh = pt_inside(pt+spt, p0,px,py);
printf("set arrow %d from %g,%g to %g,%g front nohead\n", labN, x+tick_sh.x*tileSz,y+tick_sh.y*tileSz, x+tick2sh.x*tileSz,y+tick2sh.y*tileSz);
printf("set label %d \"%c\" at %g,%g front center\n", labN, "xyz?"[cs], x+tick2sh.x*tileSz,y+tick2sh.y*tileSz+tick_gap*((tick2sh.y<tick_sh.y)?-1.:1.));
} else if(is4bnd) {
float2 fmin,fmax; fmin = fmax = fm[cs][threadIdx.x][threadIdx.y];
for(int i=0;i<2;i++) for(int j=0;j<2;j++) {
float2 f = fm[cs][threadIdx.x+i][threadIdx.y+j];
if(f.x<fmin.x) fmin.x = f.x;
if(f.y<fmin.y) fmin.y = f.y;
if(f.x>fmax.x) fmax.x = f.x;
if(f.y>fmax.y) fmax.y = f.y;
}
if(fmin.x<bMin[cp] || fmax.x>bMax[cp]) {// cM = cm;
int mmin=floorf(fmin.y/im3D.MeshBox[cm]), mmax=floorf(fmax.y/im3D.MeshBox[cm]);
if(mmin != mmax) is4tick = true;
pt.x = fmin.x<bMin[cp]?bMin[cp]:bMax[cp]; spt.x = fmin.x<bMin[cp]?-tick_gap:tick_gap;
pt.y = mmax*im3D.MeshBox[cm];
tick_val = im3D.base[cm] + pt.y*im3D.step[cm];
} else if(fmin.y<bMin[cm] || fmax.y>bMax[cm]) {// cM = cp;
int mmin=floorf(fmin.x/im3D.MeshBox[cp]), mmax=floorf(fmax.x/im3D.MeshBox[cp]);
if(mmin != mmax) is4tick = true;
pt.x = mmax*im3D.MeshBox[cp];
pt.y = fmin.y<bMin[cm]?bMin[cm]:bMax[cm]; spt.y = fmin.y<bMin[cm]?-tick_gap:tick_gap;
tick_val = im3D.base[cp] + pt.x*im3D.step[cp];
}
if(is4tick) {
float2 p0=fm[cs][threadIdx.x][threadIdx.y], px=fm[cs][threadIdx.x+1][threadIdx.y], py=fm[cs][threadIdx.x][threadIdx.y+1], p1=fm[cs][threadIdx.x+1][threadIdx.y+1];
if(is_inside(pt, p0,px,py)) {
tick_sh = pt_inside(pt, p0,px,py);
tick2sh = pt_inside(pt+spt, p0,px,py);
} else if(is_inside(pt, p1,py,px)) {
tick_sh = 1.0-pt_inside(pt, p1,py,px);
tick2sh = 1.0-pt_inside(pt+spt, p1,py,px);
} else is4tick = false;
if(is4tick) printf("set label %d \"%g\" at %g,%g front %s\n", labN, tick_val, x+tick2sh.x*tileSz,y+tick2sh.y*tileSz, (tick2sh.x<tick_sh.x)?"right":"left");
}
}
uchar4 red=make_uchar4(255,0,0,0), green=make_uchar4(0,255,0,0), blue=make_uchar4(0,0,255,0);
uchar4 ltred=make_uchar4(128,0,0,0), ltgreen=make_uchar4(0,128,0,0), ltblue=make_uchar4(0,0,128,0);
if(is4axis) {
mk_box(x,y, red);
mk_pts(x+tick2sh.x*tileSz,y+tick2sh.y*tileSz, red);
} else if(is4tick) {
mk_box(x,y, blue);
mk_pts(x+tick2sh.x*tileSz,y+tick2sh.y*tileSz, blue);
} else if(is4bnd) mk_box(x,y, green);
else mk_box(x,y, ltblue);
}
__global__ void __launch_bounds__(1024,1) grad_render3D() {
#if DATA_VECTOR_SZ==1
const float opacityThreshold = im3D.opacity;
const float density=im3D.density, brightness=im.max_rgb;
float3 boxMin, boxMax; set_boxMinMax(boxMin, boxMax);
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
Ray eyeRay; set_eyeRay(eyeRay, x,y);
uchar4& vbmp=get_backgrownd(eyeRay, boxMin, boxMax, y*im3D.bNx + x);
float phi=im3D.randArr[threadIdx.x+threadIdx.y*blockDim.x];
set_eyeRay(eyeRay, x+im3D.randR*cos(phi),y+im3D.randR*sin(phi));
float tnear, tfar;
int hit = intersectBox(eyeRay, boxMin, boxMax, &tnear, &tfar);
if (!hit) return;
if(tnear < 0.0f) tnear = 0.0f; // clamp to near plane
float4 sum = make_float4(0.0f);
const float3 SzfdBox=make_float3(im3D.Nx,im3D.Ny,im3D.Nz)/(boxMax-boxMin);
float3 pos_sc = (eyeRay.o + eyeRay.d*tnear-boxMin)*SzfdBox-0.5f;
const float3 step_sc = (eyeRay.d*im3D.tstep)*SzfdBox;
for(float t=tnear; t<tfar; t+=im3D.tstep, pos_sc += step_sc) {
// cross stencil:
float d=im3D.tstep, dd=im.max_rgb_step*0.5/d;
float dfdx=dd*(tex3D(data3D_tex, pos_sc.x+d, pos_sc.y, pos_sc.z)-tex3D(data3D_tex, pos_sc.x-d, pos_sc.y, pos_sc.z));
float dfdy=dd*(tex3D(data3D_tex, pos_sc.x, pos_sc.y+d, pos_sc.z)-tex3D(data3D_tex, pos_sc.x, pos_sc.y-d, pos_sc.z));
float dfdz=dd*(tex3D(data3D_tex, pos_sc.x, pos_sc.y, pos_sc.z+d)-tex3D(data3D_tex, pos_sc.x, pos_sc.y, pos_sc.z-d));
float4 col = im.get_color_for3D(make_float4(dfdx,dfdy,dfdz,tex3D(data3D_tex, pos_sc.x, pos_sc.y, pos_sc.z)));
float w=col.w*density*(1.0f - sum.w); col.w = 1;
sum += col * w;
if(sum.w >= opacityThreshold) {
sum -= col*(sum.w - opacityThreshold);
break;
}/*
col.w *= density;
col.x *= col.w;
col.y *= col.w;
col.z *= col.w;
sum = sum + col*(1.0f - sum.w);
if (sum.w > opacityThreshold) break;*/
}
sum.x *= brightness; sum.y *= brightness; sum.z *= brightness;
vbmp = rgbaFloatToInt(sum, vbmp);
if(im3D.draw_fg_flag) vbmp=get_foregrownd(eyeRay, boxMin, boxMax, y*im3D.bNx + x);
if(im3D.draw_sec_xyz_flag) {
if(fabs(pos_sc.x-im3D.ix0)<=0.5|| fabs(pos_sc.y-im3D.iy0)<=0.5|| fabs(pos_sc.z-im3D.iz0)<=0.5) vbmp = make_uchar4(255-vbmp.x,255-vbmp.y,255-vbmp.z,vbmp.w);
}
#endif
}
__global__ void __launch_bounds__(1024,1) surf_render3D() {
#if DATA_VECTOR_SZ==1
const float opacityThreshold = im3D.opacity;
const float density=im3D.density, brightness=im.max_rgb;
float3 boxMin, boxMax; set_boxMinMax(boxMin, boxMax);
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
Ray eyeRay; set_eyeRay(eyeRay, x,y);
uchar4& vbmp=get_backgrownd(eyeRay, boxMin, boxMax, y*im3D.bNx + x);
float phi=im3D.randArr[threadIdx.x+threadIdx.y*blockDim.x];
set_eyeRay(eyeRay, x+im3D.randR*cos(phi),y+im3D.randR*sin(phi));
float tnear, tfar;
int hit = intersectBox(eyeRay, boxMin, boxMax, &tnear, &tfar);
if (!hit) return;
if(tnear < 0.0f) tnear = 0.0f; // clamp to near plane
float4 sum = make_float4(0.0f);
const float3 SzfdBox=make_float3(im3D.Nx,im3D.Ny,im3D.Nz)/(boxMax-boxMin);
float3 pos_sc = (eyeRay.o + eyeRay.d*tnear-boxMin)*SzfdBox-0.5f;
const float3 step_sc = (eyeRay.d*im3D.tstep)*SzfdBox;
for(float t=tnear; t<tfar; t+=im3D.tstep, pos_sc += step_sc) {
// cross stencil:
short2 s2=tex3D(data3Dsurf_tex, pos_sc.x, pos_sc.y, pos_sc.z);
const short MAX_SHORT=(1<<15)-1; const float dMS=1.0f/MAX_SHORT;
float3 f={0,0,0};
if(s2.x!=-MAX_SHORT-1 && s2.y!=-MAX_SHORT-1) {
f.z = s2.x*dMS; float fxy=sqrt(1-f.z*f.z), phi=s2.y*dMS*M_PI;
f.y = fxy*sin(phi);
f.x = fxy*cos(phi);
}
float4 col = im.get_color_for3D(make_float4(f.x,f.y,f.z,tex3D(data3D_tex, pos_sc.x, pos_sc.y, pos_sc.z)));
float w=col.w*density*(1.0f - sum.w); col.w = 1;
sum += col * w;
if(sum.w >= opacityThreshold) {
sum -= col*(sum.w - opacityThreshold);
break;
}
}
sum.x *= brightness; sum.y *= brightness; sum.z *= brightness;
vbmp = rgbaFloatToInt(sum, vbmp);
if(im3D.draw_fg_flag) vbmp=get_foregrownd(eyeRay, boxMin, boxMax, y*im3D.bNx + x);
if(im3D.draw_sec_xyz_flag) {
if(fabs(pos_sc.x-im3D.ix0)<=0.5|| fabs(pos_sc.y-im3D.iy0)<=0.5|| fabs(pos_sc.z-im3D.iz0)<=0.5) vbmp = make_uchar4(255-vbmp.x,255-vbmp.y,255-vbmp.z,-vbmp.w);
}
//if(x==im3D.bNx/2 && y==im3D.bNy/2) printf("Surf: %f,%f,%f,%f*%f/%f*%f => %d,%d,%d\n", sum.x,sum.y,sum.z,sum.w, last_mul,opacityThreshold, brightness, vbmp.x,vbmp.y,vbmp.z);
#endif
}
__global__ void __launch_bounds__(1024,1) render3D() {
const float opacityThreshold = im3D.opacity;//0.95f;
const float density=im3D.density, brightness=im.max_rgb;
float3 boxMin, boxMax; set_boxMinMax(boxMin, boxMax);
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
//bool isCnt=blockIdx.x==gridDim.x/2 && blockIdx.y==gridDim.y/2 && threadIdx.x == blockDim.x/2 && threadIdx.y == blockDim.y/2;
//if ((x >= im3D.bNx) || (y >= im3D.bNy)) return;
//if(x==0 && y==0) printf("block: %gx%gx%g\n", boxMax.x, boxMax.y, boxMax.z);
// calculate eye ray in world space
Ray eyeRay; set_eyeRay(eyeRay, x,y);
//const int Nsum=im3D.Nx+im3D.Ny+im3D.Nz;
//const float dbNxy=2.0f/(im3D.bNx+im3D.bNy);
//eyeRay.o = make_float3(mul(c_invViewMatrix, make_float4(0.0f, 0.0f, 0.0f, 0.32f*Nsum)));
//eyeRay.d = normalize(make_float3((x-im3D.bNx/2)*dbNxy, (y-im3D.bNy/2)*dbNxy, -2.0f));
//eyeRay.d = mul(c_invViewMatrix, eyeRay.d);
uchar4& vbmp=get_backgrownd(eyeRay, boxMin, boxMax, y*im3D.bNx + x);
float phi=im3D.randArr[threadIdx.x+threadIdx.y*blockDim.x];
set_eyeRay(eyeRay, x+im3D.randR*cos(phi),y+im3D.randR*sin(phi));
float tnear, tfar;
int hit = intersectBox(eyeRay, boxMin, boxMax, &tnear, &tfar);
if (!hit) return;
if(tnear < 0.0f) tnear = 0.0f; // clamp to near plane
//if(tnear+im3D.tstep*Nsum<tfar) tfar = tnear+im3D.tstep*Nsum;
// march along ray from front to back, accumulating color
float4 sum = make_float4(0.0f);
//float3 pos = eyeRay.o + eyeRay.d*tnear;
//float3 step = eyeRay.d*im3D.tstep;
const float3 SzfdBox=make_float3(im3D.Nx,im3D.Ny,im3D.Nz)/(boxMax-boxMin);
float3 pos_sc = (eyeRay.o + eyeRay.d*tnear-boxMin)*SzfdBox-0.5f;
const float3 step_sc = (eyeRay.d*im3D.tstep)*SzfdBox;
//const float pscale=im.pscale*0.01f, fscale=100.0f*im.fscale, fmin=0.5f-im.fmin*fscale;
//if(isCnt) printf("I am ray: %f(%f)%f step: %f,%f,%f; pos: %f,%f,%f of %d,%d,%d\n", tnear,im3D.tstep,tfar, step_sc.x,step_sc.y,step_sc.z, pos_sc.x, pos_sc.y, pos_sc.z, im3D.Nx,im3D.Ny,im3D.Nz);
for(float t=tnear; t<tfar; t+=im3D.tstep, pos_sc += step_sc) {
// read from 3D texture
float4 col = im.get_color_for3D(tex3D(data3D_tex, pos_sc.x, pos_sc.y, pos_sc.z));
float w=col.w*density*(1.0f - sum.w); col.w = 1;
sum += col * w;
if(sum.w >= opacityThreshold) {
sum -= col*(sum.w - opacityThreshold);
break;
}/*
//float f = tex3D(data3D_tex, pos_sc.x, pos_sc.y, pos_sc.z);
//float4 col = tex1D(fpal_col_tex, 0.5f + pscale*tex1D(fpal_scale_tex, fmin+f*fscale));
col.w *= density;
// "under" operator for back-to-front blending
//sum = lerp(sum, col, col.w);
// pre-multiply alpha
col.x *= col.w;
col.y *= col.w;
col.z *= col.w;
// "over" operator for front-to-back blending
sum = sum + col*(1.0f - sum.w);
// exit early if opaque
if (sum.w > opacityThreshold) break;
// pos_sc += step_sc;
*/
}
//if(isCnt) printf("I am ray: %f\n",sum.w);
sum.x *= brightness; sum.y *= brightness; sum.z *= brightness;
//sum *= brightness;
// write output color
vbmp = rgbaFloatToInt(sum, vbmp);
if(im3D.draw_fg_flag) vbmp=get_foregrownd(eyeRay, boxMin, boxMax, y*im3D.bNx + x);
//if(threadIdx.x==0 && threadIdx.y==0) vbmp = make_uchar4(255,255,255,255);
if(im3D.draw_sec_xyz_flag) {
if(fabs(pos_sc.x-im3D.ix0)<=0.5|| fabs(pos_sc.y-im3D.iy0)<=0.5|| fabs(pos_sc.z-im3D.iz0)<=0.5) vbmp = make_uchar4(255-vbmp.x,255-vbmp.y,255-vbmp.z,-vbmp.w);
}
}
void im3D_pars::save_bmp4backgrownd() {
try {
uchar4* devPtr; size_t size;
if(CHECK_ERROR(cudaGraphicsMapResources(1, &im2D.resource, NULL))) throw(-1);
if(imHost.negate_flag) negate <<<bNx/NW,NW>>>();
if(CHECK_ERROR(cudaGraphicsResourceGetMappedPointer((void**) &devPtr, &size, im2D.resource))) throw(-1);
if(imHost.bmp4backgrownd != 0) CHECK_ERROR(cudaFree(imHost.bmp4backgrownd));
if(CHECK_ERROR(cudaMalloc((void**) &imHost.bmp4backgrownd, size))) throw(-1);
if(CHECK_ERROR(cudaMemcpy(imHost.bmp4backgrownd, devPtr, size, cudaMemcpyDeviceToDevice))) throw(-1);
im2D.unmapAfterDraw();
} catch(...) {
printf("save_bmp4backgrownd: Возникла какая-то ошибка.\n");
}
}
void im3D_pars::recalc_sec_im3D() {
try {
imHost.bmp = im2D.map4draw();
imHost.bind2draw();
if(CHECK_ERROR(cudaMemcpyToSymbol(im, &imHost, sizeof(imHost)))) throw(-1);
if(CHECK_ERROR(cudaMemcpyToSymbol(im3D, this, sizeof(im3D_pars)))) throw(-1);
int NxZ=Nx/x_zoom, NyZ=Ny/y_zoom, NzZ=Nz/z_zoom;
int NxB=(NxZ+NW-1)/NW, NyB=(NyZ+NW-1)/NW, NzB=(NzZ+NW-1)/NW;
unsigned char ub[3];
for(int i=0; i<3; i++) { ub[i] = bkgr_col[i]<0?0:(bkgr_col[i]>1?255:255.*bkgr_col[i]); }
im3Dclear <<<dim3(bNx/NW,bNy/NW),dim3(NW,NW)>>>(make_uchar4(ub[0], ub[1], ub[2], 255));
int shX=0,shY=0;
for(int ix=int(Nx*RotPoint[0])%int(MeshBox[0]); ix<Nx; ix+=MeshBox[0]) {
if(shX+NyZ>bNx) { shX=0; shY += NzZ+2; } if(shY+NzZ>bNy) break;
im3Ddraw_any<0,1,2> <<<dim3(NyB,NzB),dim3(NW,NW)>>>(shX+shY*bNx,ix);
shX += NyZ+2;
}// if(shX>0) { shX=0; shY += NzZ+2; }
for(int iy=int(Ny*RotPoint[1])%int(MeshBox[1]); iy<Ny; iy+=MeshBox[1]) {
if(shX+NxZ>bNx) { shX=0; shY += NzZ+2; } if(shY+NzZ>bNy) break;
im3Ddraw_any<1,0,2> <<<dim3(NxB,NzB),dim3(NW,NW)>>>(shX+shY*bNx,iy);
shX += NxZ+2;
} if(shX>0) { shX=0; shY += NzZ+2; }
for(int iz=int(Nz*RotPoint[2])%int(MeshBox[2]); iz<Nz; iz+=MeshBox[2]) {
//printf("draw xy at iz=%d; (%d,%d) -> (%d,%d)..\n", iz, shX,shY, 0,shY +(NyZ+2), );
if(shX+NxZ>bNx) { shX=0; shY += NyZ+2; } if(shY+NyZ>bNy) break;
im3Ddraw_any<1,2,0> <<<dim3(NxB,NyB),dim3(NW,NW)>>>(shX+shY*bNx,iz);
shX += NxZ+2;
}// if(shX>0) { shX=0; shY += NyZ+2; }
if(imHost.draw_flag) draw_pal <<<bNx/NW,NW>>>();
if(imHost.negate_flag) negate <<<bNx/NW,NW>>>();
imHost.nFrame++;
imHost.unbindAfterDraw();
im2D.unmapAfterDraw();
} catch(...) {
printf("recalc_im3D: Возникла какая-то ошибка.\n");
}
}
void im3D_pars::shift0(int x, int y, int x1, int y1) {
int ix,iy, dx=x1-x, dy=y1-y, sh=dx+dy*bNx;
if(secType!=1) {
ix=(x-secXsh%bNx)*z_zoom; iy=(y-secXsh/bNx)*y_zoom;
if(0<=ix && ix<Nz && 0<=iy && iy<Ny) { if(secXsh%bNx+dx>=0 && secXsh/bNx+dy>=0) secXsh += sh; return; }
} else {
ix=(x-secXsh%bNx)*y_zoom; iy=(y-secXsh/bNx)*z_zoom;
if(0<=ix && ix<Ny && 0<=iy && iy<Nz) { if(secXsh%bNx+dx>=0 && secXsh/bNx+dy>=0) secXsh += sh; return; }
}
if(secType<2) {
ix=(x-secYsh%bNx)*x_zoom; iy=(y-secYsh/bNx)*z_zoom;
if(0<=ix && ix<Nx && 0<=iy && iy<Nz) { if(secYsh%bNx+dx>=0 && secYsh/bNx+dy>=0) secYsh += sh; return; }
} else {
ix=(x-secYsh%bNx)*z_zoom; iy=(y-secYsh/bNx)*x_zoom;
if(0<=ix && ix<Nz && 0<=iy && iy<Nx) { if(secYsh%bNx+dx>=0 && secYsh/bNx+dy>=0) secYsh += sh; return; }
}
ix=(x-secZsh%bNx)*x_zoom; iy=(y-secZsh/bNx)*y_zoom;
if(0<=ix && ix<Nx && 0<=iy && iy<Ny) { if(secZsh%bNx+dx>=0 && secZsh/bNx+dy>=0) secZsh += sh; return; }
}
void im3D_pars::reset0(int x, int y) {
int ix,iy;
ix=(x-secZsh%bNx)*x_zoom; iy=(y-secZsh/bNx)*y_zoom;
if(0<=ix && ix<Nx && 0<=iy && iy<Ny) { ix0 = ix; iy0 = iy; return; }
if(secType<2) {
ix=(x-secYsh%bNx)*x_zoom; iy=(y-secYsh/bNx)*z_zoom;
if(0<=ix && ix<Nx && 0<=iy && iy<Nz) { ix0 = ix; iz0 = iy; return; }
} else {
ix=(x-secYsh%bNx)*z_zoom; iy=(y-secYsh/bNx)*x_zoom;
if(0<=ix && ix<Nz && 0<=iy && iy<Nx) { iz0 = ix; ix0 = iy; return; }
}
if(secType!=1) {
ix=(x-secXsh%bNx)*z_zoom; iy=(y-secXsh/bNx)*y_zoom;
if(0<=ix && ix<Nz && 0<=iy && iy<Ny) { iz0 = ix; iy0 = iy; return; }
} else {
ix=(x-secXsh%bNx)*y_zoom; iy=(y-secXsh/bNx)*z_zoom;
if(0<=ix && ix<Ny && 0<=iy && iy<Nz) { iy0 = ix; iz0 = iy; return; }
}
}
void im3D_pars::recalc_im3D() {
try {
imHost.bmp = im2D.map4draw();
imHost.bind2draw();
if(CHECK_ERROR(cudaMemcpyToSymbol(im, &imHost, sizeof(imHost)))) throw(-1);
if(CHECK_ERROR(cudaMemcpyToSymbol(im3D, this, sizeof(im3D_pars)))) throw(-1);
//if(CHECK_ERROR(cudaMemcpyToSymbol(c_invViewMatrix, invViewMatrix, sizeof(float4)*3))) throw(-1);
//if(CHECK_ERROR(cudaDeviceSetCacheConfig(cudaFuncCachePreferShared))) throw(-1);
//Pal via Tex
int NxB=(Nx/x_zoom+NW-1)/NW, NyB=(Ny/y_zoom+NW-1)/NW, NzB=(Nz/z_zoom+NW-1)/NW;
unsigned char ub[3];
for(int i=0; i<3; i++) { ub[i] = bkgr_col[i]<0?0:(bkgr_col[i]>1?255:255.*bkgr_col[i]); }
im3Dclear <<<dim3(bNx/NW,bNy/NW),dim3(NW,NW)>>>(make_uchar4(ub[0], ub[1], ub[2], 255));
im3Ddraw_any<1,2,0> <<<dim3(NxB,NyB),dim3(NW,NW)>>>(secZsh,iz0);
if(secType<2) im3Ddraw_any<1,0,2> <<<dim3(NxB,NzB),dim3(NW,NW)>>>(secYsh,iy0);
else im3Ddraw_any<2,0,1> <<<dim3(NzB,NxB),dim3(NW,NW)>>>(secYsh,iy0);
if(secType!=1) im3Ddraw_any<0,2,1> <<<dim3(NzB,NyB),dim3(NW,NW)>>>(secXsh,ix0);
else im3Ddraw_any<0,1,2> <<<dim3(NyB,NzB),dim3(NW,NW)>>>(secXsh,ix0);
if(imHost.draw_flag) draw_pal <<<bNx/NW,NW>>>();
if(imHost.negate_flag) negate <<<bNx/NW,NW>>>();
imHost.nFrame++;
imHost.unbindAfterDraw();
im2D.unmapAfterDraw();
} catch(...) {
printf("recalc_im3D: Возникла какая-то ошибка.\n");
}
}
void im3D_pars::recalc3D_im3D() {
try {
// use OpenGL to build view matrix
GLfloat modelView[16];
glMatrixMode(GL_MODELVIEW);
glPushMatrix();
glLoadIdentity();
switch(mk_state.modState) {
case GLUT_ACTIVE_SHIFT:
glRotatef(-viewRotation[1], 0.0, 1.0, 0.0);
glRotatef(-viewRotation[0], 1.0, 0.0, 0.0);
break;
case GLUT_ACTIVE_CTRL:
default:
glRotatef(-viewRotation[0], 1.0, 0.0, 0.0);
glRotatef(-viewRotation[1], 0.0, 1.0, 0.0);
glRotatef(-viewRotation[2], 0.0, 0.0, 1.0);
glRotatef(-viewRotationTmp[0], 1.0, 0.0, 0.0);
glRotatef(-viewRotationTmp[1], 0.0, 1.0, 0.0);
}
glTranslatef(-viewTranslation[0], -viewTranslation[1], -viewTranslation[2]);
glGetFloatv(GL_MODELVIEW_MATRIX, modelView);
glPopMatrix();
for(int i=0; i<12; i++) invViewMatrix[i] = modelView[4*(i&3)+i/4];
if(CHECK_ERROR(cudaMemcpyToSymbol(c_invViewMatrix, invViewMatrix, sizeof(float4)*3))) throw(-1);
//copyInvViewMatrix(invViewMatrix, sizeof(float4)*3);
imHost.bmp = im2D.map4draw();
imHost.bind2draw();
if(CHECK_ERROR(cudaMemcpyToSymbol(im, &imHost, sizeof(imHost)))) throw(-1);
if(CHECK_ERROR(cudaMemcpyToSymbol(im3D, this, sizeof(im3D_pars)))) throw(-1);
//if(CHECK_ERROR(cudaDeviceSetCacheConfig(cudaFuncCachePreferShared))) throw(-1);
switch(mode3D) {
case 0: render3D <<<dim3(bNx/NW,bNy/NW),dim3(NW,NW)>>>(); break;
case 1:
#ifdef SURF
surf_render3D <<<dim3(bNx/NW,bNy/NW),dim3(NW,NW)>>>();
#else//SURF
printf("Для задействования визуализации на поверхности скомпилируйте im3D.cu с опцией -DSURF или используйте im3Dsurf\n");
#endif//SURF
break;
case 2: grad_render3D <<<dim3(bNx/NW,bNy/NW),dim3(NW,NW)>>>(); break;
}
if(imHost.draw_flag) {
//if(mode3D<=1)
draw_pal <<<bNx/NW,NW>>>();
if(mode3D>0) draw_pal3D <<<NW,NW>>>();
//if(imHost.palDim <= 2) draw_pal <<<bNx/NW,NW>>>();
//else if(imHost.palDim > 1) draw_pal3D <<<NW,NW>>>();
}
if(imHost.negate_flag) negate <<<bNx/NW,NW>>>();
imHost.nFrame++;
imHost.unbindAfterDraw();
im2D.unmapAfterDraw();
} catch(...) {
printf("recalc3D_im3D: Возникла какая-то ошибка.\n");
}
}
#include <cufft.h>
//inline __device__ float my_fabsC(float2& v) { return v.x;}//hypotf(v.x, v.y); }
inline __device__ float my_fabsC(float2& v) { return hypotf(v.x, v.y); }
inline __device__ int my_abs(int v) { return v>=0?v:-v; }
//inline __device__ int my_abs(int v) { return v==0?1:v>=0?v:-v; }
__global__ void cmplx2abs(cufftComplex *dataC, cufftReal *dataR) {
//float* pC=(float*)(dataC+blockIdx.x*(blockDim.x/2+1));
//dataR[blockIdx.x*blockDim.x+threadIdx.x] = pC[threadIdx.x];
dataR[blockIdx.x*blockDim.x+threadIdx.x] = my_fabsC(dataC[blockIdx.x*(blockDim.x/2+1)+my_abs(blockDim.x/2-threadIdx.x)]);
}
#define CHECK_ERROR_FFT(err) CheckErrorFFT( err, __FILE__,__LINE__)
bool CheckErrorFFT(cufftResult rs, const char *file, int line) {
if(rs == CUFFT_SUCCESS) return false;
const char* err="Непонятная ошибка в cuFFT";
switch(rs) {
case CUFFT_SUCCESS: err = "0, // The cuFFT operation was successful";
case CUFFT_INVALID_PLAN: err = "1, // cuFFT was passed an invalid plan handle";
case CUFFT_ALLOC_FAILED: err = "2, // cuFFT failed to allocate GPU or CPU memory";
case CUFFT_INVALID_TYPE: err = "3, // No longer used";
case CUFFT_INVALID_VALUE: err = "4, // User specified an invalid pointer or parameter";
case CUFFT_INTERNAL_ERROR: err = "5, // Driver or internal cuFFT library error";
case CUFFT_EXEC_FAILED: err = "6, // Failed to execute an FFT on the GPU";
case CUFFT_SETUP_FAILED: err = "7, // The cuFFT library failed to initialize";
case CUFFT_INVALID_SIZE: err = "8, // User specified an invalid transform size";
case CUFFT_UNALIGNED_DATA: err = "9, // No longer used";
case CUFFT_INCOMPLETE_PARAMETER_LIST: err = "10, // Missing parameters in call";
case CUFFT_INVALID_DEVICE: err = "11, // Execution of a plan was on different GPU than plan creation";
case CUFFT_PARSE_ERROR: err = "12, // Internal plan database error";
case CUFFT_NO_WORKSPACE: err = "13 // No workspace has been provided prior to plan execution";
};
fprintf(stderr, "%s in %s at line %d\n", err, file, line);
return true;
}
void makeFFTz(float* buf, int Nx, int Ny, int Nz) {
try {
cufftHandle plan;
cufftComplex *dataC; cufftReal *dataR;
if(CHECK_ERROR(cudaMalloc((void**)&dataC, sizeof(cufftComplex)*(Nz/2+1)*Nx*Ny))) throw(-1);
if(CHECK_ERROR(cudaMalloc((void**)&dataR, sizeof(cufftReal)*Nz*Nx*Ny))) throw(-1);
if(CHECK_ERROR(cudaMemcpy(dataR, buf, 4*Nz*Nx*Ny, cudaMemcpyHostToDevice))) throw(-1);
if(CHECK_ERROR_FFT(cufftPlan1d(&plan, Nz, CUFFT_R2C, Nx*Ny))) throw(-1);
if(CHECK_ERROR_FFT(cufftExecR2C(plan, dataR, dataC))) throw(-1);
if(CHECK_ERROR(cudaDeviceSynchronize())) throw(-1);
cmplx2abs <<<Nx*Ny,Nz>>>(dataC, dataR);
if(CHECK_ERROR(cudaDeviceSynchronize())) throw(-1);
if(CHECK_ERROR(cudaMemcpy(buf, dataR, 4*Nz*Nx*Ny, cudaMemcpyDeviceToHost))) throw(-1);
if(CHECK_ERROR_FFT(cufftDestroy(plan))) throw(-1);
if(CHECK_ERROR(cudaFree(dataC))) throw(-1);
if(CHECK_ERROR(cudaFree(dataR))) throw(-1);
} catch(...) {
printf("Ошибка в makeFFTz.\n");
}
}
void im3D_pars::initCuda(Arr3D_pars& arr) {
//printf("==============\n");
//for(int ix=0; ix<Nx; ix++) for(int iy=0; iy<Ny; iy++) for(int iz=0; iz<Nz; iz++) arr.Arr3Dbuf[iz*Ny*Nx+iy*Nx+ix]=exp(-0.01*ix);
// create transfer function texture
//cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
//if(CHECK_ERROR(cudaMalloc3DArray(&data3D_texArray, &channelDesc, make_cudaExtent(Nx,Ny,Nz)))) throw(-1);
cudaMemcpy3DParms myparms={0};
myparms.srcPos = make_cudaPos(0,0,0);
myparms.dstPos = make_cudaPos(0,0,0);
myparms.srcPtr = make_cudaPitchedPtr(arr.Arr3Dbuf, Nx*sizeof(floatT4im), Nx, Ny);
myparms.dstArray = data3D_texArray;
myparms.extent = make_cudaExtent(Nx,Ny,Nz);
myparms.kind = arr.inGPUmem?cudaMemcpyDeviceToDevice:cudaMemcpyHostToDevice;
if(CHECK_ERROR(cudaMemcpy3D(&myparms))) throw(-1);
//if(draw_edges_flag) draw_edges(imHost.fmax);
initTex();
}
void im3D_pars::initTex() {
data3D_tex.normalized = false;//true;
data3D_tex.filterMode = ((render_type==3)==filterMode_flag)?cudaFilterModeLinear:cudaFilterModePoint; //Point;//filter_pal?cudaFilterModePoint:cudaFilterModeLinear;
data3D_tex.addressMode[0] = cudaAddressModeClamp;//cyclic_pal?cudaAddressModeWrap:cudaAddressModeClamp;
data3D_tex.addressMode[1] = cudaAddressModeClamp;//cyclic_pal?cudaAddressModeWrap:cudaAddressModeClamp;
data3D_tex.addressMode[2] = cudaAddressModeClamp;//cyclic_pal?cudaAddressModeWrap:cudaAddressModeClamp;
if(CHECK_ERROR(cudaBindTextureToArray(data3D_tex, data3D_texArray))) throw(-1);
}
void im3D_pars::initCuda_surf(Arr3D_pars& arr, size_t sh) {
#ifdef SURF
cudaMemcpy3DParms myparms={0};
myparms.srcPos = make_cudaPos(0,0,0);
myparms.dstPos = make_cudaPos(0,0,0);
size_t N=Nx; N*=Ny; N*=Nz;
myparms.srcPtr = make_cudaPitchedPtr(arr.Arr3Dbuf+sh, Nx*sizeof(short2), Nx, Ny);
myparms.dstArray = data3Dsurf_texArray;
myparms.extent = make_cudaExtent(Nx,Ny,Nz);
myparms.kind = arr.inGPUmem?cudaMemcpyDeviceToDevice:cudaMemcpyHostToDevice;
if(CHECK_ERROR(cudaMemcpy3D(&myparms))) throw(-1);
initTex_surf();
#endif//SURF
}
void im3D_pars::initTex_surf() {
data3Dsurf_tex.normalized = false;//true;
data3Dsurf_tex.filterMode = cudaFilterModePoint; //Point;//filter_pal?cudaFilterModePoint:cudaFilterModeLinear;
data3Dsurf_tex.addressMode[0] = cudaAddressModeClamp;//cyclic_pal?cudaAddressModeWrap:cudaAddressModeClamp;
data3Dsurf_tex.addressMode[1] = cudaAddressModeClamp;//cyclic_pal?cudaAddressModeWrap:cudaAddressModeClamp;
data3Dsurf_tex.addressMode[2] = cudaAddressModeClamp;//cyclic_pal?cudaAddressModeWrap:cudaAddressModeClamp;
if(CHECK_ERROR(cudaBindTextureToArray(data3Dsurf_tex, data3Dsurf_texArray))) throw(-1);
}
void reset(im3D_pars* p) {
imHost.reset();
imHost.set_lim(-1.f,1.f);
imHost.draw_flag = imHost.negate_flag = imHost.centric_pal = true;
imHost.cyclic_pal = false;
if(p) p->reset();
}
void im3D_pars::init3D(Arr3D_pars& arr) {
//::reset();
optfid = open(optfName, O_RDWR|O_CREAT, 0644);
if(optfid<0) printf("Не могу открыть файл %s, сохранение/загрузка наборов опций визуализации невозможна\n", optfName);
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<floatT4im>();
printf("im3D_pars::init3D: Nx,Ny,Nz=%d,%d,%d\n", Nx,Ny,Nz);
if(CHECK_ERROR(cudaMalloc3DArray(&data3D_texArray, &channelDesc, make_cudaExtent(Nx,Ny,Nz)))) throw(-1);
if(CHECK_ERROR(cudaMalloc(&randArr, NW*NW*sizeof(float)))) throw(-1);
curandState *devStates;
cudaMalloc( (void **)&devStates, NW*NW*sizeof(curandState) );
init_rand<<<NW,NW>>>(devStates,randArr);
if(CHECK_ERROR(cudaDeviceSynchronize())) throw(-1);
cudaFree(devStates);
//initCuda(arr); ---- !!!!!!!!!!!!!!!!!!!
#ifdef SURF
if(CHECK_ERROR(cudaDeviceSynchronize())) throw(-1);
channelDesc = cudaCreateChannelDesc<short2>();
if(CHECK_ERROR(cudaMalloc3DArray(&data3Dsurf_texArray, &channelDesc, make_cudaExtent(Nx,Ny,Nz)))) throw(-1);
if(CHECK_ERROR(cudaDeviceSynchronize())) throw(-1);
//initCuda_surf(arr); ----- !!!!!!!!!!!!!!!!!!1
#endif//SURF
}
void im3D_pars::recalc_func() {
if(recalc_always || recalc_at_once) {
if(recalc_at_once) recalc_at_once=false;
else xyz->step();
cudaTimer tm; tm.start();
if(draw_bmp4backgrownd>=2 && render_type==3) {
switch(draw_bmp4backgrownd) {
case 2: recalc_im3D(); break;
case 3: recalc_sec_im3D(); break;
}
save_bmp4backgrownd();
}
switch(render_type) {
case 2: recalc_im3D(); break;
case 3: recalc3D_im3D(); break;
}
runTime=tm.stop(); SmoothFPS = 0.9*SmoothFPS+100./runTime;
if(type_diag_flag>=2) printf("Frame %d (%.2f/%.2f fps), last run Times: %7.2f msec\n", imHost.nFrame, SmoothFPS, 1000./runTime, runTime);
}
}
int im3D_pars::init_from_command_line(char** argv) {
if(strcmp(*argv,"--sensor")==0) { float v[3]; read_float3(v, argv[1]); icalcNdrop.add_sensor(v[0], v[1], v[2]); return 2; }
return im3D_pars4save::init_from_command_line(argv);
}
floatT4im Arr3D_pars::get_val_from_arr3D(int ix, int iy, int iz) {
if(inCPUmem) return ((floatT4im*)Arr3Dbuf)[get_ind(ix,iy,iz)];
floatT4im res;
if(inGPUmem) CHECK_ERROR(cudaMemcpy(&res, get_ptr((sizeof(floatT4im)/sizeof(float))*ix,iy,iz), sizeof(floatT4im), cudaMemcpyDeviceToHost));
return res;
}
/*
__global__ void calc_limits(float* buf, float* fLims, int Nxv, int Nxa, int Nxs) {
float2 fLim;
float* pf=buf+blockIdx.x*Nxv+threadIdx.x;
fLim.x = fLim.y = *pf;
for(int i=0; i<Nxs; i++,pf+=Nxa*Nxv) {
float v=*pf;
if(v<fLim.x) fLim.x = v;
if(v>fLim.y) fLim.y = v;
}
__shared__ float2 fLim_sh[Nxv];
fLim_sh[threadIdx.x] = fLim;
__syncthreads();
if(threadIdx.x>warpSize) return;
for(int i=threadIdx.x; i<Nxv; i+=warpSize) {
float2 v=fLim_sh[i];
if(v.x<fLim.x) fLim.x = v.x;
if(v.y>fLim.y) fLim.y = v.y;
}
fLim_sh[threadIdx.x] = fLim;
if(threadIdx.x>0) return;
for(int i=0; i<warpSize; i++) {
float2 v=fLim_sh[i];
if(v.x<fLim.x) fLim.x = v.x;
if(v.y>fLim.y) fLim.y = v.y;
}
fLims[2*blockIdx.x ] = fLim.x;
fLims[2*blockIdx.x+1] = fLim.y;
}
void Arr3D_pars::set_lim_from_arr3D() {
if(inCPUmem) reset_min_max();
if(inGPUmem) {
float* fLims=0,* fLimsD=0;
CHECK_ERROR(cudaMalloc((void**) &fLimsD, 2*Ny*sizeof(float)));
calc_limits<<<Ny,Nx>>>(Arr3Dbuf, fLimsD, Nx, Ny, Nz);
fLims=new float[2*Ny];
CHECK_ERROR(cudaMemcpy(fLims, fLimsD, 2*Ny*sizeof(float), cudaMemcpyDeviceToHost));
CHECK_ERROR(cudaFree(fLimsD));
fMin = fLims[0]; fMax = fLims[1];
for(int i=0; i<Ny; i++) {
if(fLims[2*i ]<fMin) fMin = fLims[2*i ];
if(fLims[2*i+1]>fMax) fMax = fLims[2*i+1];
}
delete fLims;
}
}*/
|
2bce458919543a5e6a62396a1f3e473619c9e905.hip | // !!! This is a file automatically generated by hipify!!!
#include "multi_line_curve_kernel.hpp"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "yuzu/foundation/memory/RelativePointer.hpp"
#include "yuzu/foundation/memory/pointer.hpp"
#include "yuzu/domain/curves/CurveData.hpp"
#include "yuzu/common/gpu.hpp"
#include "yuzu/utils/kernel_utils.hpp"
#define X(x) points[2*(x) + 0]
#define Y(x) points[2*(x) + 1]
namespace ay = axis::yuzu;
namespace ayfm = axis::yuzu::foundation::memory;
namespace aydcu = axis::yuzu::domain::curves;
__global__ void __launch_bounds__(AXIS_YUZU_MAX_THREADS_PER_BLOCK)
UpdateCurveOnGPUKernel( uint64 numThreadsToUse, uint64 startIndex,
void *baseMemoryAddressOnGPU, real time )
{
uint64 index = ay::GetThreadIndex(gridDim, blockIdx, blockDim, threadIdx,
startIndex);
if (!ay::IsActiveThread(index, numThreadsToUse)) return;
aydcu::CurveData curve(baseMemoryAddressOnGPU, index,
sizeof(ayfm::RelativePointer));
ayfm::RelativePointer& dataPtr =
*(ayfm::RelativePointer *)curve.GetCurveData();
real &outputBucket = *curve.GetOutputBucket();
void *curveDataRegion = *dataPtr;
uint64 numPoints = *(uint64 *)curveDataRegion;
const real *points = (real *)((uint64)curveDataRegion + sizeof(uint64));
for (size_t i = 1; i < numPoints; i++)
{
if ((X(i) > time) || (i == numPoints-1 && (abs(X(i) - time) <= 1e-15)))
{
// trivial case: horizontal line
// if (abs(Y(i-1) - Y(i)) <= 1e-15)
// {
// return Y(i);
// }
real a = Y(i) * (time - X(i-1));
real b = Y(i-1) * (time - X(i));
real c = 1.0 / (X(i) - X(i-1));
a = (a - b) * c;
outputBucket = a;
return;
// ret urn (a - b) / c;
// return (X(i)-X(i-1));
// return ((Y(i)-Y(i-1))) * (xCoord - X(i-1)) / (X(i)-X(i-1));
// return ((Y(i)-Y(i-1)) * (xCoord-X(i-1)) / (X(i)-X(i-1))) + Y(i-1);
}
}
// consider last curve point
outputBucket = Y(numPoints-1);
}
void axis::domain::curves::UpdateCurveOnGPU( uint64 numThreadsToUse,
uint64 startIndex, void *baseMemoryAddressOnGPU,
const axis::Dimension3D& gridDim, const axis::Dimension3D& blockDim,
void * streamPtr, real time )
{
dim3 grid, block;
grid.x = gridDim.X; grid.y = gridDim.Y; grid.z = gridDim.Z;
block.x = blockDim.X; block.y = blockDim.Y; block.z = blockDim.Z;
hipLaunchKernelGGL(( UpdateCurveOnGPUKernel), dim3(grid), dim3(block), 0, (hipStream_t)streamPtr,
numThreadsToUse, startIndex, baseMemoryAddressOnGPU, time);
}
| 2bce458919543a5e6a62396a1f3e473619c9e905.cu | #include "multi_line_curve_kernel.hpp"
#include <cuda.h>
#include <cuda_runtime.h>
#include "yuzu/foundation/memory/RelativePointer.hpp"
#include "yuzu/foundation/memory/pointer.hpp"
#include "yuzu/domain/curves/CurveData.hpp"
#include "yuzu/common/gpu.hpp"
#include "yuzu/utils/kernel_utils.hpp"
#define X(x) points[2*(x) + 0]
#define Y(x) points[2*(x) + 1]
namespace ay = axis::yuzu;
namespace ayfm = axis::yuzu::foundation::memory;
namespace aydcu = axis::yuzu::domain::curves;
__global__ void __launch_bounds__(AXIS_YUZU_MAX_THREADS_PER_BLOCK)
UpdateCurveOnGPUKernel( uint64 numThreadsToUse, uint64 startIndex,
void *baseMemoryAddressOnGPU, real time )
{
uint64 index = ay::GetThreadIndex(gridDim, blockIdx, blockDim, threadIdx,
startIndex);
if (!ay::IsActiveThread(index, numThreadsToUse)) return;
aydcu::CurveData curve(baseMemoryAddressOnGPU, index,
sizeof(ayfm::RelativePointer));
ayfm::RelativePointer& dataPtr =
*(ayfm::RelativePointer *)curve.GetCurveData();
real &outputBucket = *curve.GetOutputBucket();
void *curveDataRegion = *dataPtr;
uint64 numPoints = *(uint64 *)curveDataRegion;
const real *points = (real *)((uint64)curveDataRegion + sizeof(uint64));
for (size_t i = 1; i < numPoints; i++)
{
if ((X(i) > time) || (i == numPoints-1 && (abs(X(i) - time) <= 1e-15)))
{
// trivial case: horizontal line
// if (abs(Y(i-1) - Y(i)) <= 1e-15)
// {
// return Y(i);
// }
real a = Y(i) * (time - X(i-1));
real b = Y(i-1) * (time - X(i));
real c = 1.0 / (X(i) - X(i-1));
a = (a - b) * c;
outputBucket = a;
return;
// ret urn (a - b) / c;
// return (X(i)-X(i-1));
// return ((Y(i)-Y(i-1))) * (xCoord - X(i-1)) / (X(i)-X(i-1));
// return ((Y(i)-Y(i-1)) * (xCoord-X(i-1)) / (X(i)-X(i-1))) + Y(i-1);
}
}
// consider last curve point
outputBucket = Y(numPoints-1);
}
void axis::domain::curves::UpdateCurveOnGPU( uint64 numThreadsToUse,
uint64 startIndex, void *baseMemoryAddressOnGPU,
const axis::Dimension3D& gridDim, const axis::Dimension3D& blockDim,
void * streamPtr, real time )
{
dim3 grid, block;
grid.x = gridDim.X; grid.y = gridDim.Y; grid.z = gridDim.Z;
block.x = blockDim.X; block.y = blockDim.Y; block.z = blockDim.Z;
UpdateCurveOnGPUKernel<<<grid, block, 0, (cudaStream_t)streamPtr>>>(
numThreadsToUse, startIndex, baseMemoryAddressOnGPU, time);
}
|
665fea619cc1ef56b0b4a3b7d301dfbf7687b852.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Verify the behavior of the +gfxN-insts in the way that
// rocm-device-libs should be built with. e.g. If the device libraries has a function
// with "+gfx11-insts", that attribute should still be present after linking and not
// overwritten with the current target's settings.
// This is important because at this time, many device-libs functions that are only
// available on some GPUs put an attribute such as "+gfx11-insts" so that
// AMDGPURemoveIncompatibleFunctions can detect & remove them if needed.
// Build the fake device library in the way rocm-device-libs should be built.
//
// RUN: %clang_cc1 -x cl -triple amdgcn-amd-amdhsa\
// RUN: -mcode-object-version=none -emit-llvm-bc \
// RUN: %S/Inputs/ocml-sample-target-attrs.cl -o %t.bc
// Check the default behavior
// RUN: %clang_cc1 -x hip -triple amdgcn-amd-amdhsa -target-cpu gfx803 -fcuda-is-device \
// RUN: -mlink-builtin-bitcode %t.bc \
// RUN: -emit-llvm %s -o - | FileCheck %s --check-prefixes=CHECK,INTERNALIZE
// RUN: %clang_cc1 -x hip -triple amdgcn-amd-amdhsa -target-cpu gfx1101 -fcuda-is-device \
// RUN: -mlink-builtin-bitcode %t.bc -emit-llvm %s -o - | FileCheck %s --check-prefixes=CHECK,INTERNALIZE
// Check the case where no internalization is performed
// RUN: %clang_cc1 -x hip -triple amdgcn-amd-amdhsa -target-cpu gfx803 \
// RUN: -fcuda-is-device -mlink-bitcode-file %t.bc -emit-llvm %s -o - | FileCheck %s --check-prefixes=CHECK,NOINTERNALIZE
// Check the case where no internalization is performed
// RUN: %clang_cc1 -x hip -triple amdgcn-amd-amdhsa -target-cpu gfx1101 \
// RUN: -fcuda-is-device -mlink-bitcode-file %t.bc -emit-llvm %s -o - | FileCheck %s --check-prefixes=CHECK,NOINTERNALIZE
// CHECK: define {{.*}} i64 @do_intrin_stuff() #[[ATTR:[0-9]+]]
// INTERNALIZE: attributes #[[ATTR]] = {{.*}} "target-cpu"="gfx{{.*}}" "target-features"="+gfx11-insts"
// NOINTERNALIZE: attributes #[[ATTR]] = {{.*}} "target-features"="+gfx11-insts"
#define __device__ __attribute__((device))
#define __global__ __attribute__((global))
typedef unsigned long ulong;
extern "C" {
__device__ ulong do_intrin_stuff(void);
__global__ void kernel_f16(ulong* out) {
*out = do_intrin_stuff();
}
}
| 665fea619cc1ef56b0b4a3b7d301dfbf7687b852.cu | // Verify the behavior of the +gfxN-insts in the way that
// rocm-device-libs should be built with. e.g. If the device libraries has a function
// with "+gfx11-insts", that attribute should still be present after linking and not
// overwritten with the current target's settings.
// This is important because at this time, many device-libs functions that are only
// available on some GPUs put an attribute such as "+gfx11-insts" so that
// AMDGPURemoveIncompatibleFunctions can detect & remove them if needed.
// Build the fake device library in the way rocm-device-libs should be built.
//
// RUN: %clang_cc1 -x cl -triple amdgcn-amd-amdhsa\
// RUN: -mcode-object-version=none -emit-llvm-bc \
// RUN: %S/Inputs/ocml-sample-target-attrs.cl -o %t.bc
// Check the default behavior
// RUN: %clang_cc1 -x hip -triple amdgcn-amd-amdhsa -target-cpu gfx803 -fcuda-is-device \
// RUN: -mlink-builtin-bitcode %t.bc \
// RUN: -emit-llvm %s -o - | FileCheck %s --check-prefixes=CHECK,INTERNALIZE
// RUN: %clang_cc1 -x hip -triple amdgcn-amd-amdhsa -target-cpu gfx1101 -fcuda-is-device \
// RUN: -mlink-builtin-bitcode %t.bc -emit-llvm %s -o - | FileCheck %s --check-prefixes=CHECK,INTERNALIZE
// Check the case where no internalization is performed
// RUN: %clang_cc1 -x hip -triple amdgcn-amd-amdhsa -target-cpu gfx803 \
// RUN: -fcuda-is-device -mlink-bitcode-file %t.bc -emit-llvm %s -o - | FileCheck %s --check-prefixes=CHECK,NOINTERNALIZE
// Check the case where no internalization is performed
// RUN: %clang_cc1 -x hip -triple amdgcn-amd-amdhsa -target-cpu gfx1101 \
// RUN: -fcuda-is-device -mlink-bitcode-file %t.bc -emit-llvm %s -o - | FileCheck %s --check-prefixes=CHECK,NOINTERNALIZE
// CHECK: define {{.*}} i64 @do_intrin_stuff() #[[ATTR:[0-9]+]]
// INTERNALIZE: attributes #[[ATTR]] = {{.*}} "target-cpu"="gfx{{.*}}" "target-features"="+gfx11-insts"
// NOINTERNALIZE: attributes #[[ATTR]] = {{.*}} "target-features"="+gfx11-insts"
#define __device__ __attribute__((device))
#define __global__ __attribute__((global))
typedef unsigned long ulong;
extern "C" {
__device__ ulong do_intrin_stuff(void);
__global__ void kernel_f16(ulong* out) {
*out = do_intrin_stuff();
}
}
|
2c5dfe47cd6d9e8635912e6c518829c67ffd56f7.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <limits.h>
#include <unistd.h>
#if !defined(CPU)
#include <hip/hip_runtime.h>
#include "cuda_utils.h"
#include "random/RNG_rand48.h"
#endif
#include "config.h"
#include "load_params.h"
#include "load_instance.h"
#include "scenario.h"
#include "etc_matrix.h"
#include "energy_matrix.h"
#include "solution.h"
#include "utils.h"
#include "basic/mct.h"
#include "basic/minmin.h"
#include "basic/pminmin.h"
#include "cmochc/cmochc_cell.h"
#include "cmochc/cmochc_island.h"
struct params INPUT;
struct scenario SCENARIO;
struct etc_matrix ETC;
struct energy_matrix ENERGY;
int main(int argc, char** argv)
{
fprintf(stderr, "[INFO] == Global configuration constants ======================\n");
fprintf(stderr, " Debug level : %d\n", DEBUG_LEVEL);
fprintf(stderr, " Floating point precision : %s\n", DISPLAY_PRECISION);
fprintf(stderr, " Log execution time : ");
#ifdef TIMMING
fprintf(stderr, "YES\n");
#else
fprintf(stderr, "NO\n");
#endif
fprintf(stderr, " Random number generator : ");
#ifdef CPU_RAND
fprintf(stderr, "stdlib::rand_r\n");
#endif
#ifdef CPU_DRAND48
fprintf(stderr, "stdlib::drand48_r\n");
#endif
#ifdef CPU_MT
fprintf(stderr, "mersenne twister\n");
#endif
fprintf(stderr, " Output solutions to stdout : ");
#ifdef OUTPUT_SOLUTION
fprintf(stderr, "YES\n");
#else
fprintf(stderr, "NO\n");
#endif
fprintf(stderr, " Max. number of threads : %d\n", MAX_THREADS);
fprintf(stderr, "[INFO] ========================================================\n");
// =============================================================
// Loading input parameters
// =============================================================
if (load_params(argc, argv) == EXIT_FAILURE) {
fprintf(stderr, "[ERROR] ocurri un error leyendo los parametros de entrada.\n");
return EXIT_FAILURE;
}
// =============================================================
// Loading problem instance
// =============================================================
#if defined(DEBUG_0)
fprintf(stderr, "[DEBUG] cargando la instancia del problema...\n");
#endif
// Timming -----------------------------------------------------
TIMMING_START(ts_loading)
// Timming -----------------------------------------------------
// Se pide el espacio de memoria para la instancia del problema.
init_scenario();
init_etc_matrix();
init_energy_matrix();
// Se carga la matriz de ETC.
if (load_instance() == EXIT_FAILURE) {
fprintf(stderr, "[ERROR] ocurri un error leyendo los archivos de instancia.\n");
return EXIT_FAILURE;
}
#if defined(DEBUG_2)
show_scenario();
#endif
// Timming -----------------------------------------------------
TIMMING_END("cargando instancia", ts_loading);
// Timming -----------------------------------------------------
// =============================================================
// Solving the problem.
// =============================================================
#if defined(DEBUG_0)
fprintf(stderr, "[DEBUG] executing algorithm...\n");
#endif
// Timming -----------------------------------------------------
TIMMING_START(ts)
// Timming -----------------------------------------------------
if ((INPUT.algorithm == ALGORITHM_MINMIN) ||
(INPUT.algorithm == ALGORITHM_PMINMIND) ||
(INPUT.algorithm == ALGORITHM_MCT)) {
// =============================================================
// Trajectory algorithms.
// =============================================================
// Create empty solution
#if defined(DEBUG_0)
fprintf(stderr, "[DEBUG] creating empty solution...\n");
#endif
struct solution current_solution;
create_empty_solution(¤t_solution);
if (INPUT.algorithm == ALGORITHM_MINMIN) {
compute_minmin(¤t_solution);
} else if (INPUT.algorithm == ALGORITHM_PMINMIND) {
compute_pminmin(¤t_solution);
} else if (INPUT.algorithm == ALGORITHM_MCT) {
compute_mct(¤t_solution);
}
#if defined(OUTPUT_SOLUTION)
fprintf(stdout, "1\n");
for (int task_id = 0; task_id < INPUT.tasks_count; task_id++) {
fprintf(stdout, "%d\n", current_solution.task_assignment[task_id]);
}
#endif
free_solution(¤t_solution);
} else {
// =============================================================
// Population algorithms.
// =============================================================
if (INPUT.algorithm == ALGORITHM_CMOCHCISLAND) {
compute_cmochc_island();
} else if (INPUT.algorithm == ALGORITHM_CMOCHCCELL) {
compute_cmochc_cell();
}
}
// Timming -----------------------------------------------------
TIMMING_END("Elapsed algorithm total time", ts);
// Timming -----------------------------------------------------
return EXIT_SUCCESS;
}
| 2c5dfe47cd6d9e8635912e6c518829c67ffd56f7.cu | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <limits.h>
#include <unistd.h>
#if !defined(CPU)
#include <cuda.h>
#include "cuda_utils.h"
#include "random/RNG_rand48.h"
#endif
#include "config.h"
#include "load_params.h"
#include "load_instance.h"
#include "scenario.h"
#include "etc_matrix.h"
#include "energy_matrix.h"
#include "solution.h"
#include "utils.h"
#include "basic/mct.h"
#include "basic/minmin.h"
#include "basic/pminmin.h"
#include "cmochc/cmochc_cell.h"
#include "cmochc/cmochc_island.h"
struct params INPUT;
struct scenario SCENARIO;
struct etc_matrix ETC;
struct energy_matrix ENERGY;
int main(int argc, char** argv)
{
fprintf(stderr, "[INFO] == Global configuration constants ======================\n");
fprintf(stderr, " Debug level : %d\n", DEBUG_LEVEL);
fprintf(stderr, " Floating point precision : %s\n", DISPLAY_PRECISION);
fprintf(stderr, " Log execution time : ");
#ifdef TIMMING
fprintf(stderr, "YES\n");
#else
fprintf(stderr, "NO\n");
#endif
fprintf(stderr, " Random number generator : ");
#ifdef CPU_RAND
fprintf(stderr, "stdlib::rand_r\n");
#endif
#ifdef CPU_DRAND48
fprintf(stderr, "stdlib::drand48_r\n");
#endif
#ifdef CPU_MT
fprintf(stderr, "mersenne twister\n");
#endif
fprintf(stderr, " Output solutions to stdout : ");
#ifdef OUTPUT_SOLUTION
fprintf(stderr, "YES\n");
#else
fprintf(stderr, "NO\n");
#endif
fprintf(stderr, " Max. number of threads : %d\n", MAX_THREADS);
fprintf(stderr, "[INFO] ========================================================\n");
// =============================================================
// Loading input parameters
// =============================================================
if (load_params(argc, argv) == EXIT_FAILURE) {
fprintf(stderr, "[ERROR] ocurrió un error leyendo los parametros de entrada.\n");
return EXIT_FAILURE;
}
// =============================================================
// Loading problem instance
// =============================================================
#if defined(DEBUG_0)
fprintf(stderr, "[DEBUG] cargando la instancia del problema...\n");
#endif
// Timming -----------------------------------------------------
TIMMING_START(ts_loading)
// Timming -----------------------------------------------------
// Se pide el espacio de memoria para la instancia del problema.
init_scenario();
init_etc_matrix();
init_energy_matrix();
// Se carga la matriz de ETC.
if (load_instance() == EXIT_FAILURE) {
fprintf(stderr, "[ERROR] ocurrió un error leyendo los archivos de instancia.\n");
return EXIT_FAILURE;
}
#if defined(DEBUG_2)
show_scenario();
#endif
// Timming -----------------------------------------------------
TIMMING_END("cargando instancia", ts_loading);
// Timming -----------------------------------------------------
// =============================================================
// Solving the problem.
// =============================================================
#if defined(DEBUG_0)
fprintf(stderr, "[DEBUG] executing algorithm...\n");
#endif
// Timming -----------------------------------------------------
TIMMING_START(ts)
// Timming -----------------------------------------------------
if ((INPUT.algorithm == ALGORITHM_MINMIN) ||
(INPUT.algorithm == ALGORITHM_PMINMIND) ||
(INPUT.algorithm == ALGORITHM_MCT)) {
// =============================================================
// Trajectory algorithms.
// =============================================================
// Create empty solution
#if defined(DEBUG_0)
fprintf(stderr, "[DEBUG] creating empty solution...\n");
#endif
struct solution current_solution;
create_empty_solution(¤t_solution);
if (INPUT.algorithm == ALGORITHM_MINMIN) {
compute_minmin(¤t_solution);
} else if (INPUT.algorithm == ALGORITHM_PMINMIND) {
compute_pminmin(¤t_solution);
} else if (INPUT.algorithm == ALGORITHM_MCT) {
compute_mct(¤t_solution);
}
#if defined(OUTPUT_SOLUTION)
fprintf(stdout, "1\n");
for (int task_id = 0; task_id < INPUT.tasks_count; task_id++) {
fprintf(stdout, "%d\n", current_solution.task_assignment[task_id]);
}
#endif
free_solution(¤t_solution);
} else {
// =============================================================
// Population algorithms.
// =============================================================
if (INPUT.algorithm == ALGORITHM_CMOCHCISLAND) {
compute_cmochc_island();
} else if (INPUT.algorithm == ALGORITHM_CMOCHCCELL) {
compute_cmochc_cell();
}
}
// Timming -----------------------------------------------------
TIMMING_END("Elapsed algorithm total time", ts);
// Timming -----------------------------------------------------
return EXIT_SUCCESS;
}
|
f90a80361d7b9aad111ec791bd7de951c98c39d8.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "nvjpegDecoder.h"
#include "./image_processing.cu"
int decode_images(const FileData &img_data, const std::vector<size_t> &img_len,
std::vector<nvjpegImage_t> &out, decode_params_t ¶ms,
double &time) {
CHECK_CUDA(hipStreamSynchronize(params.stream));
hipEvent_t startEvent = NULL, stopEvent = NULL;
float loopTime = 0;
CHECK_CUDA(hipEventCreateWithFlags(&startEvent, hipEventBlockingSync));
CHECK_CUDA(hipEventCreateWithFlags(&stopEvent, hipEventBlockingSync));
std::vector<const unsigned char*> batched_bitstreams;
std::vector<size_t> batched_bitstreams_size;
std::vector<nvjpegImage_t> batched_output;
// bit-streams that batched decode cannot handle
std::vector<const unsigned char*> otherdecode_bitstreams;
std::vector<size_t> otherdecode_bitstreams_size;
std::vector<nvjpegImage_t> otherdecode_output;
if(params.hw_decode_available){
for(int i = 0; i < params.batch_size; i++){
// extract bitstream meta data to figure out whether a bit-stream can be decoded
nvjpegJpegStreamParseHeader(params.nvjpeg_handle, (const unsigned char *)img_data[i].data(), img_len[i], params.jpeg_streams[0]);
int isSupported = -1;
nvjpegDecodeBatchedSupported(params.nvjpeg_handle, params.jpeg_streams[0], &isSupported);
if(isSupported == 0){
batched_bitstreams.push_back((const unsigned char *)img_data[i].data());
batched_bitstreams_size.push_back(img_len[i]);
batched_output.push_back(out[i]);
} else {
otherdecode_bitstreams.push_back((const unsigned char *)img_data[i].data());
otherdecode_bitstreams_size.push_back(img_len[i]);
otherdecode_output.push_back(out[i]);
}
}
} else {
for(int i = 0; i < params.batch_size; i++) {
otherdecode_bitstreams.push_back((const unsigned char *)img_data[i].data());
otherdecode_bitstreams_size.push_back(img_len[i]);
otherdecode_output.push_back(out[i]);
}
}
CHECK_CUDA(hipEventRecord(startEvent, params.stream));
if(batched_bitstreams.size() > 0){
CHECK_NVJPEG(
nvjpegDecodeBatchedInitialize(params.nvjpeg_handle, params.nvjpeg_state,
batched_bitstreams.size(), 1, params.fmt));
CHECK_NVJPEG(nvjpegDecodeBatched(
params.nvjpeg_handle, params.nvjpeg_state, batched_bitstreams.data(),
batched_bitstreams_size.data(), batched_output.data(), params.stream));
}
if(otherdecode_bitstreams.size() > 0){
CHECK_NVJPEG(nvjpegStateAttachDeviceBuffer(params.nvjpeg_decoupled_state, params.device_buffer));
int buffer_index = 0;
CHECK_NVJPEG(nvjpegDecodeParamsSetOutputFormat(params.nvjpeg_decode_params, params.fmt));
for (int i = 0; i < params.batch_size; i++) {
CHECK_NVJPEG(
nvjpegJpegStreamParse(params.nvjpeg_handle, otherdecode_bitstreams[i], otherdecode_bitstreams_size[i],
0, 0, params.jpeg_streams[buffer_index]));
CHECK_NVJPEG(nvjpegStateAttachPinnedBuffer(params.nvjpeg_decoupled_state,
params.pinned_buffers[buffer_index]));
CHECK_NVJPEG(nvjpegDecodeJpegHost(params.nvjpeg_handle, params.nvjpeg_decoder, params.nvjpeg_decoupled_state,
params.nvjpeg_decode_params, params.jpeg_streams[buffer_index]));
CHECK_CUDA(hipStreamSynchronize(params.stream));
CHECK_NVJPEG(nvjpegDecodeJpegTransferToDevice(params.nvjpeg_handle, params.nvjpeg_decoder, params.nvjpeg_decoupled_state,
params.jpeg_streams[buffer_index], params.stream));
buffer_index = 1 - buffer_index; // switch pinned buffer in pipeline mode to avoid an extra sync
CHECK_NVJPEG(nvjpegDecodeJpegDevice(params.nvjpeg_handle, params.nvjpeg_decoder, params.nvjpeg_decoupled_state,
&otherdecode_output[i], params.stream));
}
}
CHECK_CUDA(hipEventRecord(stopEvent, params.stream));
CHECK_CUDA(hipEventSynchronize(stopEvent));
CHECK_CUDA(hipEventElapsedTime(&loopTime, startEvent, stopEvent));
time = static_cast<double>(loopTime);
return EXIT_SUCCESS;
}
int write_images(std::vector<nvjpegImage_t> &iout, std::vector<int> &widths,
std::vector<int> &heights, decode_params_t ¶ms,
FileNames &filenames) {
for (int i = 0; i < params.batch_size; i++) {
// Get the file name, without extension.
// This will be used to rename the output file.
size_t position = filenames[i].rfind("/");
std::string sFileName =
(std::string::npos == position)
? filenames[i]
: filenames[i].substr(position + 1, filenames[i].size());
position = sFileName.rfind(".");
sFileName = (std::string::npos == position) ? sFileName
: sFileName.substr(0, position);
std::string fname(params.output_dir + "/" + sFileName + ".bmp");
int err;
if (params.fmt == NVJPEG_OUTPUT_RGB || params.fmt == NVJPEG_OUTPUT_BGR) {
err = writeBMP(fname.c_str(), iout[i].channel[0], iout[i].pitch[0],
iout[i].channel[1], iout[i].pitch[1], iout[i].channel[2],
iout[i].pitch[2], widths[i], heights[i]);
} else if (params.fmt == NVJPEG_OUTPUT_RGBI ||
params.fmt == NVJPEG_OUTPUT_BGRI) {
// Write BMP from interleaved data
err = writeBMPi(fname.c_str(), iout[i].channel[0], iout[i].pitch[0],
widths[i], heights[i]);
}
if (err) {
std::cout << "Cannot write output file: " << fname << std::endl;
return EXIT_FAILURE;
}
std::cout << "Done writing decoded image to file: " << fname << std::endl;
}
return EXIT_SUCCESS;
}
double process_images(FileNames &image_names, decode_params_t ¶ms,
double &total) {
// vector for storing raw files and file lengths
FileData file_data(params.batch_size);
std::vector<size_t> file_len(params.batch_size);
FileNames current_names(params.batch_size);
std::vector<int> widths(params.batch_size);
std::vector<int> heights(params.batch_size);
// we wrap over image files to process total_images of files
FileNames::iterator file_iter = image_names.begin();
// stream for decoding
CHECK_CUDA(
hipStreamCreateWithFlags(¶ms.stream, hipStreamNonBlocking));
int total_processed = 0;
// output buffers
std::vector<nvjpegImage_t> iout(params.batch_size);
// output buffer sizes, for convenience
std::vector<nvjpegImage_t> isz(params.batch_size);
for (int i = 0; i < iout.size(); i++) {
for (int c = 0; c < NVJPEG_MAX_COMPONENT; c++) {
iout[i].channel[c] = NULL;
iout[i].pitch[c] = 0;
isz[i].pitch[c] = 0;
}
}
double test_time = 0;
int warmup = 0;
while (total_processed < params.total_images) {
if (read_next_batch(image_names, params.batch_size, file_iter, file_data,
file_len, current_names))
return EXIT_FAILURE;
if (prepare_buffers(file_data, file_len, widths, heights, iout, isz,
current_names, params))
return EXIT_FAILURE;
double time;
if (decode_images(file_data, file_len, iout, params, time))
return EXIT_FAILURE;
if (warmup < params.warmup) {
warmup++;
} else {
total_processed += params.batch_size;
test_time += time;
}
/*----------- Your own image processing starts here! -----------*/
if(image_processing_gpu(iout, widths, heights, params)){
fprintf(stderr, "%s", "[ERROR]: GPU Image processing error \n");
return EXIT_FAILURE;
}
/*----------- Your own image processing ends here! -----------*/
if (params.write_decoded)
write_images(iout, widths, heights, params, current_names);
}
total = test_time;
release_buffers(iout);
CHECK_CUDA(hipStreamDestroy(params.stream));
return EXIT_SUCCESS;
}
| f90a80361d7b9aad111ec791bd7de951c98c39d8.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "nvjpegDecoder.h"
#include "./image_processing.cu"
int decode_images(const FileData &img_data, const std::vector<size_t> &img_len,
std::vector<nvjpegImage_t> &out, decode_params_t ¶ms,
double &time) {
CHECK_CUDA(cudaStreamSynchronize(params.stream));
cudaEvent_t startEvent = NULL, stopEvent = NULL;
float loopTime = 0;
CHECK_CUDA(cudaEventCreateWithFlags(&startEvent, cudaEventBlockingSync));
CHECK_CUDA(cudaEventCreateWithFlags(&stopEvent, cudaEventBlockingSync));
std::vector<const unsigned char*> batched_bitstreams;
std::vector<size_t> batched_bitstreams_size;
std::vector<nvjpegImage_t> batched_output;
// bit-streams that batched decode cannot handle
std::vector<const unsigned char*> otherdecode_bitstreams;
std::vector<size_t> otherdecode_bitstreams_size;
std::vector<nvjpegImage_t> otherdecode_output;
if(params.hw_decode_available){
for(int i = 0; i < params.batch_size; i++){
// extract bitstream meta data to figure out whether a bit-stream can be decoded
nvjpegJpegStreamParseHeader(params.nvjpeg_handle, (const unsigned char *)img_data[i].data(), img_len[i], params.jpeg_streams[0]);
int isSupported = -1;
nvjpegDecodeBatchedSupported(params.nvjpeg_handle, params.jpeg_streams[0], &isSupported);
if(isSupported == 0){
batched_bitstreams.push_back((const unsigned char *)img_data[i].data());
batched_bitstreams_size.push_back(img_len[i]);
batched_output.push_back(out[i]);
} else {
otherdecode_bitstreams.push_back((const unsigned char *)img_data[i].data());
otherdecode_bitstreams_size.push_back(img_len[i]);
otherdecode_output.push_back(out[i]);
}
}
} else {
for(int i = 0; i < params.batch_size; i++) {
otherdecode_bitstreams.push_back((const unsigned char *)img_data[i].data());
otherdecode_bitstreams_size.push_back(img_len[i]);
otherdecode_output.push_back(out[i]);
}
}
CHECK_CUDA(cudaEventRecord(startEvent, params.stream));
if(batched_bitstreams.size() > 0){
CHECK_NVJPEG(
nvjpegDecodeBatchedInitialize(params.nvjpeg_handle, params.nvjpeg_state,
batched_bitstreams.size(), 1, params.fmt));
CHECK_NVJPEG(nvjpegDecodeBatched(
params.nvjpeg_handle, params.nvjpeg_state, batched_bitstreams.data(),
batched_bitstreams_size.data(), batched_output.data(), params.stream));
}
if(otherdecode_bitstreams.size() > 0){
CHECK_NVJPEG(nvjpegStateAttachDeviceBuffer(params.nvjpeg_decoupled_state, params.device_buffer));
int buffer_index = 0;
CHECK_NVJPEG(nvjpegDecodeParamsSetOutputFormat(params.nvjpeg_decode_params, params.fmt));
for (int i = 0; i < params.batch_size; i++) {
CHECK_NVJPEG(
nvjpegJpegStreamParse(params.nvjpeg_handle, otherdecode_bitstreams[i], otherdecode_bitstreams_size[i],
0, 0, params.jpeg_streams[buffer_index]));
CHECK_NVJPEG(nvjpegStateAttachPinnedBuffer(params.nvjpeg_decoupled_state,
params.pinned_buffers[buffer_index]));
CHECK_NVJPEG(nvjpegDecodeJpegHost(params.nvjpeg_handle, params.nvjpeg_decoder, params.nvjpeg_decoupled_state,
params.nvjpeg_decode_params, params.jpeg_streams[buffer_index]));
CHECK_CUDA(cudaStreamSynchronize(params.stream));
CHECK_NVJPEG(nvjpegDecodeJpegTransferToDevice(params.nvjpeg_handle, params.nvjpeg_decoder, params.nvjpeg_decoupled_state,
params.jpeg_streams[buffer_index], params.stream));
buffer_index = 1 - buffer_index; // switch pinned buffer in pipeline mode to avoid an extra sync
CHECK_NVJPEG(nvjpegDecodeJpegDevice(params.nvjpeg_handle, params.nvjpeg_decoder, params.nvjpeg_decoupled_state,
&otherdecode_output[i], params.stream));
}
}
CHECK_CUDA(cudaEventRecord(stopEvent, params.stream));
CHECK_CUDA(cudaEventSynchronize(stopEvent));
CHECK_CUDA(cudaEventElapsedTime(&loopTime, startEvent, stopEvent));
time = static_cast<double>(loopTime);
return EXIT_SUCCESS;
}
int write_images(std::vector<nvjpegImage_t> &iout, std::vector<int> &widths,
std::vector<int> &heights, decode_params_t ¶ms,
FileNames &filenames) {
for (int i = 0; i < params.batch_size; i++) {
// Get the file name, without extension.
// This will be used to rename the output file.
size_t position = filenames[i].rfind("/");
std::string sFileName =
(std::string::npos == position)
? filenames[i]
: filenames[i].substr(position + 1, filenames[i].size());
position = sFileName.rfind(".");
sFileName = (std::string::npos == position) ? sFileName
: sFileName.substr(0, position);
std::string fname(params.output_dir + "/" + sFileName + ".bmp");
int err;
if (params.fmt == NVJPEG_OUTPUT_RGB || params.fmt == NVJPEG_OUTPUT_BGR) {
err = writeBMP(fname.c_str(), iout[i].channel[0], iout[i].pitch[0],
iout[i].channel[1], iout[i].pitch[1], iout[i].channel[2],
iout[i].pitch[2], widths[i], heights[i]);
} else if (params.fmt == NVJPEG_OUTPUT_RGBI ||
params.fmt == NVJPEG_OUTPUT_BGRI) {
// Write BMP from interleaved data
err = writeBMPi(fname.c_str(), iout[i].channel[0], iout[i].pitch[0],
widths[i], heights[i]);
}
if (err) {
std::cout << "Cannot write output file: " << fname << std::endl;
return EXIT_FAILURE;
}
std::cout << "Done writing decoded image to file: " << fname << std::endl;
}
return EXIT_SUCCESS;
}
double process_images(FileNames &image_names, decode_params_t ¶ms,
double &total) {
// vector for storing raw files and file lengths
FileData file_data(params.batch_size);
std::vector<size_t> file_len(params.batch_size);
FileNames current_names(params.batch_size);
std::vector<int> widths(params.batch_size);
std::vector<int> heights(params.batch_size);
// we wrap over image files to process total_images of files
FileNames::iterator file_iter = image_names.begin();
// stream for decoding
CHECK_CUDA(
cudaStreamCreateWithFlags(¶ms.stream, cudaStreamNonBlocking));
int total_processed = 0;
// output buffers
std::vector<nvjpegImage_t> iout(params.batch_size);
// output buffer sizes, for convenience
std::vector<nvjpegImage_t> isz(params.batch_size);
for (int i = 0; i < iout.size(); i++) {
for (int c = 0; c < NVJPEG_MAX_COMPONENT; c++) {
iout[i].channel[c] = NULL;
iout[i].pitch[c] = 0;
isz[i].pitch[c] = 0;
}
}
double test_time = 0;
int warmup = 0;
while (total_processed < params.total_images) {
if (read_next_batch(image_names, params.batch_size, file_iter, file_data,
file_len, current_names))
return EXIT_FAILURE;
if (prepare_buffers(file_data, file_len, widths, heights, iout, isz,
current_names, params))
return EXIT_FAILURE;
double time;
if (decode_images(file_data, file_len, iout, params, time))
return EXIT_FAILURE;
if (warmup < params.warmup) {
warmup++;
} else {
total_processed += params.batch_size;
test_time += time;
}
/*----------- Your own image processing starts here! -----------*/
if(image_processing_gpu(iout, widths, heights, params)){
fprintf(stderr, "%s", "[ERROR]: GPU Image processing error \n");
return EXIT_FAILURE;
}
/*----------- Your own image processing ends here! -----------*/
if (params.write_decoded)
write_images(iout, widths, heights, params, current_names);
}
total = test_time;
release_buffers(iout);
CHECK_CUDA(cudaStreamDestroy(params.stream));
return EXIT_SUCCESS;
}
|
e850efab494d149c115da606b757f98c62e4e6c0.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <bits/stdc++.h>
#define BLOCK_SIZE 32
#define TILE_WIDTH BLOCK_SIZE
//int BLOCK_SIZE, TILE_WIDTH;
using namespace std;
//Declarations :
//matrix initialization
void init(int *A, int n, int d);
//matrix comparation
bool compare(int *A, int *B, int n);
//print matrix
void printmat(int *A, int rows, int cols);
//sequential matrix multiplication
void matMult(int *h_A, int *h_B, int *h_C, int common, int Arows, int Bcols);
//pre kernel matrix multiplication
void prematMultP(int *A, int *B, int *C, int common, int Arows, int Bcols);
//pre kernel matrix tiling multiplication
void prematMultPTiled(int *A, int *B, int *C, int common, int Arows, int Bcols);
//Parallel kernel
__global__ void matMultP (int *d_A, int *d_B, int *d_C, int common, int Arows, int Bcols);
//Parallel kernel (tiling)
__global__ void matMultPTiled(int *d_A, int *d_B, int *d_C, int common, int Arows, int Bcols);
//End declarations
int main() {
for(int i = 0; i < 10; i++){
cout<<i+1<<endl;
//cin>>BLOCK_SIZE;
//TILE_WIDTH = BLOCK_SIZE;
int Arows,common,Bcols;
cin >> Arows >> common >> Bcols;
int sizeA = Arows * common * sizeof(int);
int sizeB = common * Bcols * sizeof(int);
int sizeR = Arows * Bcols * sizeof(int);
int *A = (int *)malloc(sizeA);
int *B = (int *)malloc(sizeB);
int *C = (int *)malloc(sizeR);
int *D = (int *)malloc(sizeR);
int *E = (int *)malloc(sizeR);
init(A, Arows * common, 1.5);
init(B, common * Bcols, 1.75);
init(C, Arows * Bcols, 0);
init(D, Arows * Bcols, 0);
init(E, Arows * Bcols, 0);
double a, b, c;
//Sequential
clock_t t = clock();
matMult(A, B, C, common, Arows, Bcols);
t = clock() - t;
a = ((float)t) / CLOCKS_PER_SEC;
//cout << "Tiempo secuencial:" << endl;
cout << a << endl;
//Parallel
t = clock();
prematMultP(A, B, D, common, Arows, Bcols);
t = clock() - t;
b = ((float)t) / CLOCKS_PER_SEC;
//cout << "Tiempo paralelo: " << endl;
cout << b << endl;
//cout << "Acelero con X " << endl;
cout << (a / b) << endl;
//Parallel (tiling)
t = clock();
prematMultPTiled(A, B, E, common, Arows, Bcols);
t = clock() - t;
c = ((float)t) / CLOCKS_PER_SEC;
//cout << "Tiempo paralelo con tilings: " << endl;
cout << c << endl;
//cout << "Acelero con X " << endl;
cout << (a / c) << endl;
//print matrix
//printmat(C,Arows,Bcols);
//printmat(D,Arows,Bcols);
//printmat(E,Arows,Bcols);
//checking
if(compare(C, D, Arows * Bcols) and compare(D, E, Arows * Bcols))
cout << "Ok :)" << endl;
else
cout << "No ok :(" << endl;
//Free
free(A);
free(B);
free(C);
free(D);
}
return 0;
}
//Functions
//matrix initialization
void init(int *A,int n, int d) {
for(int i = 0; i < n; i++)
A[i] = d;
}
//matrix comparation
bool compare(int *A, int *B, int n) {
for(int i = 0; i < n; i++)
if(A[i] != B[i])
return false;
return true;
}
//print matrix
void printmat(int *A, int rows, int cols) {
for(int i = 0; i < rows; i++) {
for(int j = 0; j < cols; j++) {
cout << A[i * rows + j] << " ";
}
cout << endl;
}
cout << endl;
}
//matrix multiplication
void matMult(int *h_A, int *h_B, int *h_C, int common, int Arows, int Bcols) {
int sum;
for(int i = 0; i < Arows; i++)
for(int j = 0; j < Bcols; j++) {
sum = 0;
for(int k = 0; k < common; k++)
sum += h_A[common * i + k] * h_B[Bcols * k + j];
h_C[Bcols * i + j] = sum;
}
}
//pre kernel matrix multiplication
void prematMultP(int *A, int *B, int *C, int common, int Arows, int Bcols) {
int sizeA = Arows * common * sizeof(int);
int sizeB = common * Bcols * sizeof(int);
int sizeR = Arows * Bcols * sizeof(int);
int *d_A, *d_B, *d_C;
//Allocate memory for device
hipMalloc(&d_A, sizeA);
hipMalloc(&d_B, sizeB);
hipMalloc(&d_C, sizeR);
//Copy Data from host to device
hipMemcpy(d_A, A, sizeA, hipMemcpyHostToDevice);
hipMemcpy(d_B, B, sizeB, hipMemcpyHostToDevice);
//Blocks and Grids
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(ceil(Bcols / (float)BLOCK_SIZE), ceil(Arows / (float)BLOCK_SIZE));
//Launch Kernel
hipLaunchKernelGGL(( matMultP), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C, common, Arows, Bcols);
hipDeviceSynchronize();
//Copy from device, free device memory
hipMemcpy (C, d_C, sizeR, hipMemcpyDeviceToHost);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
//pre kernel matrix tiling multiplication
void prematMultPTiled(int *A, int *B, int *C, int common, int Arows, int Bcols) {
int sizeA = Arows * common * sizeof(int);
int sizeB = common * Bcols * sizeof(int);
int sizeR = Arows * Bcols * sizeof(int);
int *d_A, *d_B, *d_C;
//Allocate memory for device
hipMalloc(&d_A, sizeA);
hipMalloc(&d_B, sizeB);
hipMalloc(&d_C, sizeR);
//Copy Data from host to device
hipMemcpy(d_A, A, sizeA, hipMemcpyHostToDevice);
hipMemcpy(d_B, B, sizeB, hipMemcpyHostToDevice);
//Blocks and Grids
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(ceil(Bcols / (float)BLOCK_SIZE), ceil(Arows / (float)BLOCK_SIZE));
//Launch Kernel
hipLaunchKernelGGL(( matMultPTiled), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C, common, Arows, Bcols);
hipDeviceSynchronize();
//Copy from device, free device memory
hipMemcpy (C, d_C, sizeR, hipMemcpyDeviceToHost);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
//Parallel kernel
__global__ void matMultP (int *d_A, int *d_B, int *d_C, int common, int Arows, int Bcols) {
int i = threadIdx.y + blockDim.y * blockIdx.y;
int j = threadIdx.x + blockDim.x * blockIdx.x;
if(i < Arows and j < Bcols) {
int sum = 0;
for(int k = 0; k < common; ++k)
sum += d_A[common * i + k] * d_B[Bcols * k + j];
d_C[Bcols * i + j] = sum;
}
}
//Parallel kernel (tiling)
__global__ void matMultPTiled(int *d_A, int *d_B, int *d_C, int common, int Arows, int Bcols) {
__shared__ int Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ int Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * TILE_WIDTH + ty;
int col = bx * TILE_WIDTH + tx;
int Pvalue = 0;
for(int m = 0; m < (common + TILE_WIDTH - 1) / TILE_WIDTH; ++m) {
if(m * TILE_WIDTH + tx < common and row < Arows)
Mds[ty][tx] = d_A[row * common + m * TILE_WIDTH + tx];
else
Mds[ty][tx] = 0;
if(m * TILE_WIDTH + ty < common and col < Bcols)
Nds[ty][tx] = d_B[(m * TILE_WIDTH + ty) * Bcols + col];
else
Nds[ty][tx] = 0;
__syncthreads();
for(int k = 0; k < TILE_WIDTH; ++k) {
Pvalue += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();
}
if(row < Arows and col < Bcols)
d_C[row * Bcols + col] = Pvalue;
}
| e850efab494d149c115da606b757f98c62e4e6c0.cu | #include <cuda.h>
#include <bits/stdc++.h>
#define BLOCK_SIZE 32
#define TILE_WIDTH BLOCK_SIZE
//int BLOCK_SIZE, TILE_WIDTH;
using namespace std;
//Declarations :
//matrix initialization
void init(int *A, int n, int d);
//matrix comparation
bool compare(int *A, int *B, int n);
//print matrix
void printmat(int *A, int rows, int cols);
//sequential matrix multiplication
void matMult(int *h_A, int *h_B, int *h_C, int common, int Arows, int Bcols);
//pre kernel matrix multiplication
void prematMultP(int *A, int *B, int *C, int common, int Arows, int Bcols);
//pre kernel matrix tiling multiplication
void prematMultPTiled(int *A, int *B, int *C, int common, int Arows, int Bcols);
//Parallel kernel
__global__ void matMultP (int *d_A, int *d_B, int *d_C, int common, int Arows, int Bcols);
//Parallel kernel (tiling)
__global__ void matMultPTiled(int *d_A, int *d_B, int *d_C, int common, int Arows, int Bcols);
//End declarations
int main() {
for(int i = 0; i < 10; i++){
cout<<i+1<<endl;
//cin>>BLOCK_SIZE;
//TILE_WIDTH = BLOCK_SIZE;
int Arows,common,Bcols;
cin >> Arows >> common >> Bcols;
int sizeA = Arows * common * sizeof(int);
int sizeB = common * Bcols * sizeof(int);
int sizeR = Arows * Bcols * sizeof(int);
int *A = (int *)malloc(sizeA);
int *B = (int *)malloc(sizeB);
int *C = (int *)malloc(sizeR);
int *D = (int *)malloc(sizeR);
int *E = (int *)malloc(sizeR);
init(A, Arows * common, 1.5);
init(B, common * Bcols, 1.75);
init(C, Arows * Bcols, 0);
init(D, Arows * Bcols, 0);
init(E, Arows * Bcols, 0);
double a, b, c;
//Sequential
clock_t t = clock();
matMult(A, B, C, common, Arows, Bcols);
t = clock() - t;
a = ((float)t) / CLOCKS_PER_SEC;
//cout << "Tiempo secuencial:" << endl;
cout << a << endl;
//Parallel
t = clock();
prematMultP(A, B, D, common, Arows, Bcols);
t = clock() - t;
b = ((float)t) / CLOCKS_PER_SEC;
//cout << "Tiempo paralelo: " << endl;
cout << b << endl;
//cout << "Acelero con X " << endl;
cout << (a / b) << endl;
//Parallel (tiling)
t = clock();
prematMultPTiled(A, B, E, common, Arows, Bcols);
t = clock() - t;
c = ((float)t) / CLOCKS_PER_SEC;
//cout << "Tiempo paralelo con tilings: " << endl;
cout << c << endl;
//cout << "Acelero con X " << endl;
cout << (a / c) << endl;
//print matrix
//printmat(C,Arows,Bcols);
//printmat(D,Arows,Bcols);
//printmat(E,Arows,Bcols);
//checking
if(compare(C, D, Arows * Bcols) and compare(D, E, Arows * Bcols))
cout << "Ok :)" << endl;
else
cout << "No ok :(" << endl;
//Free
free(A);
free(B);
free(C);
free(D);
}
return 0;
}
//Functions
//matrix initialization
void init(int *A,int n, int d) {
for(int i = 0; i < n; i++)
A[i] = d;
}
//matrix comparation
bool compare(int *A, int *B, int n) {
for(int i = 0; i < n; i++)
if(A[i] != B[i])
return false;
return true;
}
//print matrix
void printmat(int *A, int rows, int cols) {
for(int i = 0; i < rows; i++) {
for(int j = 0; j < cols; j++) {
cout << A[i * rows + j] << " ";
}
cout << endl;
}
cout << endl;
}
//matrix multiplication
void matMult(int *h_A, int *h_B, int *h_C, int common, int Arows, int Bcols) {
int sum;
for(int i = 0; i < Arows; i++)
for(int j = 0; j < Bcols; j++) {
sum = 0;
for(int k = 0; k < common; k++)
sum += h_A[common * i + k] * h_B[Bcols * k + j];
h_C[Bcols * i + j] = sum;
}
}
//pre kernel matrix multiplication
void prematMultP(int *A, int *B, int *C, int common, int Arows, int Bcols) {
int sizeA = Arows * common * sizeof(int);
int sizeB = common * Bcols * sizeof(int);
int sizeR = Arows * Bcols * sizeof(int);
int *d_A, *d_B, *d_C;
//Allocate memory for device
cudaMalloc(&d_A, sizeA);
cudaMalloc(&d_B, sizeB);
cudaMalloc(&d_C, sizeR);
//Copy Data from host to device
cudaMemcpy(d_A, A, sizeA, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, sizeB, cudaMemcpyHostToDevice);
//Blocks and Grids
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(ceil(Bcols / (float)BLOCK_SIZE), ceil(Arows / (float)BLOCK_SIZE));
//Launch Kernel
matMultP<<<dimGrid, dimBlock>>> (d_A, d_B, d_C, common, Arows, Bcols);
cudaDeviceSynchronize();
//Copy from device, free device memory
cudaMemcpy (C, d_C, sizeR, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
//pre kernel matrix tiling multiplication
void prematMultPTiled(int *A, int *B, int *C, int common, int Arows, int Bcols) {
int sizeA = Arows * common * sizeof(int);
int sizeB = common * Bcols * sizeof(int);
int sizeR = Arows * Bcols * sizeof(int);
int *d_A, *d_B, *d_C;
//Allocate memory for device
cudaMalloc(&d_A, sizeA);
cudaMalloc(&d_B, sizeB);
cudaMalloc(&d_C, sizeR);
//Copy Data from host to device
cudaMemcpy(d_A, A, sizeA, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, sizeB, cudaMemcpyHostToDevice);
//Blocks and Grids
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(ceil(Bcols / (float)BLOCK_SIZE), ceil(Arows / (float)BLOCK_SIZE));
//Launch Kernel
matMultPTiled<<<dimGrid, dimBlock>>> (d_A, d_B, d_C, common, Arows, Bcols);
cudaDeviceSynchronize();
//Copy from device, free device memory
cudaMemcpy (C, d_C, sizeR, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
//Parallel kernel
__global__ void matMultP (int *d_A, int *d_B, int *d_C, int common, int Arows, int Bcols) {
int i = threadIdx.y + blockDim.y * blockIdx.y;
int j = threadIdx.x + blockDim.x * blockIdx.x;
if(i < Arows and j < Bcols) {
int sum = 0;
for(int k = 0; k < common; ++k)
sum += d_A[common * i + k] * d_B[Bcols * k + j];
d_C[Bcols * i + j] = sum;
}
}
//Parallel kernel (tiling)
__global__ void matMultPTiled(int *d_A, int *d_B, int *d_C, int common, int Arows, int Bcols) {
__shared__ int Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ int Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * TILE_WIDTH + ty;
int col = bx * TILE_WIDTH + tx;
int Pvalue = 0;
for(int m = 0; m < (common + TILE_WIDTH - 1) / TILE_WIDTH; ++m) {
if(m * TILE_WIDTH + tx < common and row < Arows)
Mds[ty][tx] = d_A[row * common + m * TILE_WIDTH + tx];
else
Mds[ty][tx] = 0;
if(m * TILE_WIDTH + ty < common and col < Bcols)
Nds[ty][tx] = d_B[(m * TILE_WIDTH + ty) * Bcols + col];
else
Nds[ty][tx] = 0;
__syncthreads();
for(int k = 0; k < TILE_WIDTH; ++k) {
Pvalue += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();
}
if(row < Arows and col < Bcols)
d_C[row * Bcols + col] = Pvalue;
}
|
1d9bd7de4c176dd6c457d8cf55a8f1d5fa451cac.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <time.h>
#define SIZE 100
__global__ void VectorAdd(int *a, int *b, int *c, int n){
int i=threadIdx.x;
if(i<n)
c[i]=a[i]+b[i];
}
int main(){
int *a, *b, *c;
hipMallocManaged(&a,SIZE * sizeof(int));
hipMallocManaged(&b,SIZE * sizeof(int));
hipMallocManaged(&c,SIZE * sizeof(int));
for (int i = 0; i < SIZE; i++)
{
a[i]=i;
b[i]=i;
c[i]=0;
}
hipLaunchKernelGGL(( VectorAdd) , dim3(1),dim3(SIZE), 0, 0, a,b,c,SIZE);
hipDeviceSynchronize();
for (int i = 0; i < 10; ++i)
{
printf("c[%d] = %d\n", i, c[i]);
}
hipFree(a);
hipFree(b);
hipFree(c);
return 0;
} | 1d9bd7de4c176dd6c457d8cf55a8f1d5fa451cac.cu | #include <stdio.h>
#include <time.h>
#define SIZE 100
__global__ void VectorAdd(int *a, int *b, int *c, int n){
int i=threadIdx.x;
if(i<n)
c[i]=a[i]+b[i];
}
int main(){
int *a, *b, *c;
cudaMallocManaged(&a,SIZE * sizeof(int));
cudaMallocManaged(&b,SIZE * sizeof(int));
cudaMallocManaged(&c,SIZE * sizeof(int));
for (int i = 0; i < SIZE; i++)
{
a[i]=i;
b[i]=i;
c[i]=0;
}
VectorAdd <<<1,SIZE>>> (a,b,c,SIZE);
cudaDeviceSynchronize();
for (int i = 0; i < 10; ++i)
{
printf("c[%d] = %d\n", i, c[i]);
}
cudaFree(a);
cudaFree(b);
cudaFree(c);
return 0;
} |
b2e4f154de99fc5161618c76203fb1f169f07aa4.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <hip/hip_fp16.h>
#include <hipcub/hipcub.hpp>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/operators/fused/attention_layer_norm.h"
#include "paddle/fluid/operators/fused/attn_gemm.h"
#include "paddle/fluid/operators/fused/fmha_ref.h"
#include "paddle/fluid/operators/fused/fused_dropout_helper.h"
#include "paddle/fluid/platform/device/gpu/gpu_dnn.h"
#include "paddle/phi/api/include/tensor.h"
#include "paddle/phi/backends/gpu/gpu_device_function.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/kernels/funcs/broadcast_function.h"
#include "paddle/phi/kernels/funcs/elementwise_functor.h"
#include "paddle/phi/kernels/funcs/functors.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/transpose_function.cu.h"
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
#include "paddle/fluid/distributed/collective/process_group_nccl.h"
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/device/gpu/nccl_helper.h"
#endif
namespace paddle {
namespace operators {
template <typename T>
static void AllReduce(phi::DenseTensor &tensor, // NOLINT
const int ring_id,
const phi::GPUContext &ctx) {
if (ring_id == -1) return;
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
auto map = paddle::distributed::ProcessGroupMapFromGid::getInstance();
if (map->has(ring_id)) {
paddle::distributed::ProcessGroup *pg = map->get(ring_id);
auto pg_nccl = static_cast<distributed::ProcessGroupNCCL *>(pg);
paddle::distributed::AllreduceOptions opts;
opts.reduce_op = distributed::ReduceOp::SUM;
auto task = pg_nccl->AllReduce(&tensor, tensor, opts, true, true);
task->Wait();
} else {
auto dtype = platform::ToNCCLDataType(
framework::TransToProtoVarType(tensor.dtype()));
int64_t numel = tensor.numel();
const void *sendbuff = tensor.data<T>();
auto place = ctx.GetPlace();
void *recvbuff = ctx.template Alloc<T>(&tensor, tensor.numel() * sizeof(T));
auto comm = platform::NCCLCommContext::Instance().Get(ring_id, place);
auto stream = ctx.stream();
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce(
sendbuff, recvbuff, numel, dtype, ncclSum, comm->comm(), stream));
}
#else
PADDLE_THROW(platform::errors::Unimplemented(
"PaddlePaddle should compile with NCCL or RCCL when used tensor model "
"parallel op."));
#endif
}
template <typename T>
class FusedAttentionOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
using U = LayerNormParamType<T>;
auto *input_x = ctx.Input<phi::DenseTensor>("X");
auto &dev_ctx = ctx.template device_context<phi::GPUContext>();
const auto pre_layer_norm = ctx.Attr<bool>("pre_layer_norm");
const float epsilon = ctx.Attr<float>("epsilon");
auto *ln_scale = ctx.Input<phi::DenseTensor>("LnScale");
auto *ln_bias = ctx.Input<phi::DenseTensor>("LnBias");
auto *ln_mean = ctx.Output<phi::DenseTensor>("LnMean");
auto *ln_var = ctx.Output<phi::DenseTensor>("LnVariance");
auto *ln_out = ctx.Output<phi::DenseTensor>("LnOut");
const auto num_heads = ctx.Attr<int>("num_heads");
const auto transpose_qkv_wb = ctx.Attr<bool>("transpose_qkv_wb");
// x: qkv's input [batch_size, seq_len, dim_embed]
// if transpose_qkv_wb is False
// y: qkv's weight: [3, num_head, dim_head, dim_embed]
// if transpose_qkv_wb is True
// y: qkv's weight: [dim_embed, 3 * dim_embed]
auto *qkv_weight = ctx.Input<phi::DenseTensor>("QKVW");
auto *qkv_bias = ctx.Input<phi::DenseTensor>("QKVBias");
auto *qkv_out = ctx.Output<phi::DenseTensor>("QKVOut");
auto *qkv_bias_out = ctx.Output<phi::DenseTensor>("QKVBiasOut");
auto *src_mask = ctx.Input<phi::DenseTensor>("SrcMask");
auto *transpose_out_2 = ctx.Output<phi::DenseTensor>("TransposeOut2");
auto *cache_kv = ctx.Input<phi::DenseTensor>("CacheKV");
auto *cache_kv_out = ctx.Output<phi::DenseTensor>("CacheKVOut");
auto *qk_out = ctx.Output<phi::DenseTensor>("QKOut");
auto *qktv_out = ctx.Output<phi::DenseTensor>("QKTVOut");
auto *softmax_out = ctx.Output<phi::DenseTensor>("SoftmaxOut");
auto *attn_dropout_mask_out =
ctx.Output<phi::DenseTensor>("AttnDropoutMaskOut");
auto *attn_dropout_out = ctx.Output<phi::DenseTensor>("AttnDropoutOut");
auto *src_mask_out = ctx.Output<phi::DenseTensor>("SrcMaskOut");
auto *fmha_out = ctx.Output<phi::DenseTensor>("FMHAOut");
auto *out_linear_weight = ctx.Input<phi::DenseTensor>("OutLinearW");
auto *out_linear_bias = ctx.Input<phi::DenseTensor>("OutLinearBias");
auto *out_linear_out = ctx.Output<phi::DenseTensor>("OutLinearOut");
auto *ln_scale_2 = ctx.Input<phi::DenseTensor>("Ln2Scale");
auto *ln_bias_2 = ctx.Input<phi::DenseTensor>("Ln2Bias");
auto *dropout_mask_out = ctx.Output<phi::DenseTensor>("DropoutMaskOut");
auto *bias_dropout_residual_out =
ctx.Output<phi::DenseTensor>("BiasDropoutResidualOut");
auto *ln_mean_2 = ctx.Output<phi::DenseTensor>("Ln2Mean");
auto *ln_var_2 = ctx.Output<phi::DenseTensor>("Ln2Variance");
const float ln_epsilon = ctx.Attr<float>("ln_epsilon");
float attn_dropout_rate = ctx.Attr<float>("attn_dropout_rate");
const bool has_attn_dropout = (attn_dropout_rate != 0.0f);
DropoutParam dropout_param2(ctx, 0);
const bool has_dropout = (dropout_param2.dropout_prob != 0.0f);
bool is_test_1 = ctx.Attr<bool>("is_test");
auto &dropout_implementation_1 =
ctx.Attr<std::string>("attn_dropout_implementation");
bool is_upscale_in_train_1 =
(dropout_implementation_1 == "upscale_in_train");
auto *seed_1 =
ctx.HasInput("Seed1") ? ctx.Input<phi::DenseTensor>("Seed1") : nullptr;
bool is_fix_seed_1 = ctx.Attr<bool>("attn_dropout_fix_seed");
int seed_val_1 = ctx.Attr<int>("attn_dropout_seed");
int ring_id = ctx.Attr<int>("ring_id");
// final output.
auto *out = ctx.Output<phi::DenseTensor>("Y");
// get data ptr for qkv part.
const auto input_x_dims = input_x->dims();
const auto qkv_w_dims = qkv_weight->dims();
auto *x_data = input_x->data<T>();
auto *qkv_weight_data = qkv_weight->data<T>();
auto *qkv_bias_data = (qkv_bias == nullptr) ? nullptr : qkv_bias->data<T>();
auto *qkv_out_data =
dev_ctx.template Alloc<T>(qkv_out, qkv_out->numel() * sizeof(T));
auto *qkv_bias_out_data =
(qkv_bias == nullptr)
? nullptr
: dev_ctx.template Alloc<T>(qkv_bias_out,
qkv_bias_out->numel() * sizeof(T));
// get data ptr for FMHA.
auto *transpose_out_2_data = dev_ctx.template Alloc<T>(
transpose_out_2, transpose_out_2->numel() * sizeof(T));
auto *cache_kv_out_data =
(cache_kv_out == nullptr)
? nullptr
: dev_ctx.template Alloc<T>(cache_kv_out,
cache_kv_out->numel() * sizeof(T));
auto *qk_out_data =
dev_ctx.template Alloc<T>(qk_out, qk_out->numel() * sizeof(T));
auto *qktv_out_data =
dev_ctx.template Alloc<T>(qktv_out, qktv_out->numel() * sizeof(T));
auto *src_mask_out_data =
(src_mask == nullptr)
? nullptr
: dev_ctx.template Alloc<T>(src_mask_out,
src_mask_out->numel() * sizeof(T));
auto *softmax_out_data = dev_ctx.template Alloc<T>(
softmax_out, softmax_out->numel() * sizeof(T));
auto *attn_dropout_mask_out_data =
has_attn_dropout ? dev_ctx.template Alloc<uint8_t>(
attn_dropout_mask_out,
attn_dropout_mask_out->numel() * sizeof(uint8_t))
: nullptr;
auto *attn_dropout_out_data =
has_attn_dropout
? dev_ctx.template Alloc<T>(attn_dropout_out,
attn_dropout_out->numel() * sizeof(T))
: nullptr;
auto *fmha_out_data =
dev_ctx.template Alloc<T>(fmha_out, fmha_out->numel() * sizeof(T));
// get data ptr for out_linear.
auto *out_linear_weight_data = out_linear_weight->data<T>();
auto *out_linear_bias_data =
(out_linear_bias == nullptr) ? nullptr : out_linear_bias->data<T>();
auto *out_linear_out_data = dev_ctx.template Alloc<T>(
out_linear_out, out_linear_out->numel() * sizeof(T));
// get data ptr for bias+dropout+residual+layernorm
auto *dropout_mask_out_data =
has_dropout
? dev_ctx.template Alloc<uint8_t>(
dropout_mask_out, dropout_mask_out->numel() * sizeof(uint8_t))
: nullptr;
auto *final_out_data =
dev_ctx.template Alloc<T>(out, out->numel() * sizeof(T));
int batch_size = input_x_dims[0];
int max_seq_len = input_x_dims[1];
int dim_embed = input_x_dims[2];
int num_head;
int dim_head;
int nranks = 1;
// get num_head and dim_head in two different ways
if (!transpose_qkv_wb) {
num_head = qkv_w_dims[1];
dim_head = qkv_w_dims[2];
} else {
nranks = (qkv_w_dims[0] * 3) / qkv_w_dims[1];
num_head = num_heads;
dim_head = dim_embed / (num_head * nranks);
}
int bsz_seq = batch_size * max_seq_len;
int hidden_size = num_head * dim_head;
int output_size = 3 * hidden_size;
int input_size = dim_embed;
auto layer_norm_compute = AttnLayerNorm<T>(
ctx.cuda_device_context(), epsilon, bsz_seq, dim_embed);
bool compute_bias = true;
if (qkv_bias == nullptr) {
compute_bias = false;
}
// (transA, transB, compute_bias) = (false, true, true)
bool transB = transpose_qkv_wb ? false : true;
auto qkv_compute = AttnMatMul<T>(ctx.cuda_device_context(),
false,
transB,
bsz_seq,
output_size,
input_size,
compute_bias);
AttnDropoutParam attn_dropout_param(is_test_1,
dropout_implementation_1,
attn_dropout_rate,
is_upscale_in_train_1,
is_fix_seed_1,
seed_val_1,
seed_1);
auto fmha_ref_compute = FMHARef<T>(ctx.cuda_device_context(),
batch_size,
max_seq_len,
num_head,
dim_head,
attn_dropout_param);
output_size = hidden_size;
// (transA, transB, compute_bias) = (false, false, false)
// NOTE(Yuang Liu): For general input size == output size, change the
// position won't have effects. For mp, the output size is mp_head * dkey
// which is actually the input size. While the input size is hidden size,
// which is actually the output size. So for out linear, switch the
// input size and output size.
auto out_linear_compute = AttnMatMul<T>(ctx.cuda_device_context(),
false,
false,
bsz_seq,
input_size,
output_size,
false);
FusedDropoutLayerNormHelper<T, uint8_t> fused_dropout_layernorm_helper(
ctx.cuda_device_context(),
bsz_seq,
dim_embed,
dropout_param2,
ln_epsilon);
if (pre_layer_norm) {
auto *ln_scale_data =
(ln_scale == nullptr ? nullptr : ln_scale->data<U>());
auto *ln_bias_data = (ln_bias == nullptr ? nullptr : ln_bias->data<U>());
auto *ln_mean_data =
dev_ctx.template Alloc<U>(ln_mean, ln_mean->numel() * sizeof(U));
auto *ln_var_data =
dev_ctx.template Alloc<U>(ln_var, ln_var->numel() * sizeof(U));
auto *ln_out_data =
dev_ctx.template Alloc<T>(ln_out, ln_out->numel() * sizeof(T));
layer_norm_compute.ComputeForward(x_data,
ln_scale_data,
ln_bias_data,
ln_out_data,
ln_mean_data,
ln_var_data);
qkv_compute.ComputeForward(
qkv_weight, ln_out, qkv_bias, qkv_out, qkv_bias_out);
} else {
qkv_compute.ComputeForward(
qkv_weight, input_x, qkv_bias, qkv_out, qkv_bias_out);
}
if (transpose_qkv_wb) {
// resize the output for fmha compute
qkv_out->Resize({batch_size, max_seq_len, 3, num_head, dim_head});
qkv_bias_out->Resize({batch_size, max_seq_len, 3, num_head, dim_head});
}
if (qkv_bias == nullptr) {
fmha_ref_compute.ComputeForward(*qkv_out,
cache_kv,
src_mask,
transpose_out_2,
cache_kv_out,
qk_out,
src_mask_out,
softmax_out,
attn_dropout_mask_out,
attn_dropout_out,
qktv_out,
fmha_out);
} else {
fmha_ref_compute.ComputeForward(*qkv_bias_out,
cache_kv,
src_mask,
transpose_out_2,
cache_kv_out,
qk_out,
src_mask_out,
softmax_out,
attn_dropout_mask_out,
attn_dropout_out,
qktv_out,
fmha_out);
}
if (transpose_qkv_wb) {
// resize the output back to make the shape compatible with infer shape
qkv_out->Resize({batch_size, max_seq_len, 3 * hidden_size});
qkv_bias_out->Resize({batch_size, max_seq_len, 3 * hidden_size});
}
// fmha_out: [batch_size, seq_len, num_head, head_dim]
// weight: [embed_dim, embed_dim]
// out_linear_out: [batch_size, seq_len, embed_dim]
out_linear_compute.ComputeForward(
out_linear_weight, fmha_out, nullptr, out_linear_out, nullptr);
// tensor model parallel
AllReduce<T>(*out_linear_out, ring_id, ctx.cuda_device_context());
bool add_residual = ctx.Attr<bool>("add_residual");
const T *residual_ptr = add_residual ? x_data : nullptr;
if (pre_layer_norm) {
// output = (residual + dropout(input + bias))
fused_dropout_layernorm_helper.ResidualDropoutBias(
ctx.cuda_device_context(),
out_linear_out_data,
residual_ptr,
out_linear_bias_data,
final_out_data,
dropout_mask_out_data);
} else {
// TODO(Xreki): support post layer_norm case when add_residual is false.
PADDLE_ENFORCE_EQ(add_residual,
true,
platform::errors::InvalidArgument(
"Attribute add_residual is expected to be true "
"when pre_layer_norm is false."));
const U *ln_scale_2_ptr = ln_scale_2 ? ln_scale_2->data<U>() : nullptr;
const U *ln_bias_2_ptr = ln_bias_2 ? ln_bias_2->data<U>() : nullptr;
T *bias_dropout_residual_out_ptr = dev_ctx.template Alloc<T>(
bias_dropout_residual_out,
bias_dropout_residual_out->numel() * sizeof(T));
U *ln_mean_2_ptr =
dev_ctx.template Alloc<U>(ln_mean_2, ln_mean_2->numel() * sizeof(U));
U *ln_var_2_ptr =
dev_ctx.template Alloc<U>(ln_var_2, ln_var_2->numel() * sizeof(U));
// output = layernorm(residual + dropout(input + bias))
fused_dropout_layernorm_helper.LayernormResidualDropoutBias(
ctx.cuda_device_context(),
out_linear_out_data,
residual_ptr,
out_linear_bias_data,
ln_scale_2_ptr,
ln_bias_2_ptr,
bias_dropout_residual_out_ptr,
dropout_mask_out_data,
final_out_data,
ln_mean_2_ptr,
ln_var_2_ptr);
}
}
};
template <typename T>
class FusedAttentionGradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
using U = LayerNormParamType<T>;
const int num_heads = ctx.Attr<int>("num_heads");
const bool transpose_qkv_wb = ctx.Attr<bool>("transpose_qkv_wb");
const auto pre_layer_norm = ctx.Attr<bool>("pre_layer_norm");
const float epsilon = ctx.Attr<float>("epsilon");
const float ln2epsilon = ctx.Attr<float>("ln_epsilon");
const float attn_dropout_prob = ctx.Attr<float>("attn_dropout_rate");
const bool has_attn_dropout = (attn_dropout_prob != 0.0f);
DropoutParam dropout_param2(ctx, 0);
const bool has_dropout = (dropout_param2.dropout_prob != 0.0f);
auto &dev_ctx = ctx.template device_context<phi::GPUContext>();
bool is_test_1 = ctx.Attr<bool>("is_test");
auto &dropout_implementation_1 =
ctx.Attr<std::string>("attn_dropout_implementation");
bool is_upscale_in_train_1 =
(dropout_implementation_1 == "upscale_in_train");
auto *seed_1 =
ctx.HasInput("Seed1") ? ctx.Input<phi::DenseTensor>("Seed1") : nullptr;
bool is_fix_seed_1 = ctx.Attr<bool>("attn_dropout_fix_seed");
int seed_val_1 = ctx.Attr<int>("attn_dropout_seed");
int ring_id = ctx.Attr<int>("ring_id");
// get inputs.
auto *d_y = ctx.Input<phi::DenseTensor>(framework::GradVarName("Y"));
auto *d_y_data = d_y->data<T>();
// fw input
auto *input_x = ctx.Input<phi::DenseTensor>("X");
auto *ln_scale = ctx.Input<phi::DenseTensor>("LnScale");
auto *ln_2_scale = ctx.Input<phi::DenseTensor>("Ln2Scale");
auto *x_data = input_x->data<T>();
auto *ln_scale_data = (ln_scale == nullptr ? nullptr : ln_scale->data<U>());
auto *ln_2_scale_data =
(ln_2_scale == nullptr ? nullptr : ln_2_scale->data<U>());
// fw parameters.
auto *src_mask = ctx.Input<phi::DenseTensor>("SrcMask");
auto *qkv_weight = ctx.Input<phi::DenseTensor>("QKVW");
auto *qkv_bias = ctx.Input<phi::DenseTensor>("QKVBias");
auto *out_linear_weight = ctx.Input<phi::DenseTensor>("OutLinearW");
auto *out_linear_bias = ctx.Input<phi::DenseTensor>("OutLinearBias");
auto *qkv_weight_data = qkv_weight->data<T>();
auto *qkv_bias_data = (qkv_bias == nullptr) ? nullptr : qkv_bias->data<T>();
auto *out_linear_weight_data = out_linear_weight->data<T>();
auto *out_linear_bias_data =
(out_linear_bias == nullptr) ? nullptr : out_linear_bias->data<T>();
// fw output
auto *fmha_out = ctx.Input<phi::DenseTensor>("FMHAOut");
auto *transpose_out_2 = ctx.Input<phi::DenseTensor>("TransposeOut2");
auto *qk_out = ctx.Input<phi::DenseTensor>("QKOut");
auto *softmax_out = ctx.Input<phi::DenseTensor>("SoftmaxOut");
auto *attn_dropout_mask_out =
ctx.Input<phi::DenseTensor>("AttnDropoutMaskOut");
auto *attn_dropout_out = ctx.Input<phi::DenseTensor>("AttnDropoutOut");
auto *src_mask_out = ctx.Input<phi::DenseTensor>("SrcMaskOut");
auto *ln_2_mean = ctx.Input<phi::DenseTensor>("Ln2Mean");
auto *ln_2_var = ctx.Input<phi::DenseTensor>("Ln2Variance");
auto *dropout_mask_out = ctx.Input<phi::DenseTensor>("DropoutMaskOut");
auto *bias_dropout_residual_out =
ctx.Input<phi::DenseTensor>("BiasDropoutResidualOut");
auto *fmha_out_data = fmha_out->data<T>();
auto *transpose_out_2_data = transpose_out_2->data<T>();
auto *softmax_out_data = softmax_out->data<T>();
auto *src_mask_out_data =
(src_mask == nullptr) ? nullptr : src_mask_out->data<T>();
auto *dropout_mask_out_data =
has_dropout ? dropout_mask_out->data<uint8_t>() : nullptr;
// output's grad
auto *d_x = ctx.Output<phi::DenseTensor>(framework::GradVarName("X"));
auto *d_qkv_out =
ctx.Output<phi::DenseTensor>(framework::GradVarName("QKVOut"));
auto *d_qkv_bias_out =
ctx.Output<phi::DenseTensor>(framework::GradVarName("QKVBiasOut"));
auto *d_qktv_out =
ctx.Output<phi::DenseTensor>(framework::GradVarName("QKTVOut"));
auto *d_transpose_out_2 =
ctx.Output<phi::DenseTensor>(framework::GradVarName("TransposeOut2"));
auto *d_qk_out =
ctx.Output<phi::DenseTensor>(framework::GradVarName("QKOut"));
auto *d_softmax_out =
ctx.Output<phi::DenseTensor>(framework::GradVarName("SoftmaxOut"));
auto *d_attn_dropout_out =
ctx.Output<phi::DenseTensor>(framework::GradVarName("AttnDropoutOut"));
auto *d_src_mask_out =
ctx.Output<phi::DenseTensor>(framework::GradVarName("SrcMaskOut"));
auto *d_fmha_out =
ctx.Output<phi::DenseTensor>(framework::GradVarName("FMHAOut"));
auto *d_out_linear_out =
ctx.Output<phi::DenseTensor>(framework::GradVarName("OutLinearOut"));
auto *d_bias_dropout_residual_out = ctx.Output<phi::DenseTensor>(
framework::GradVarName("BiasDropoutResidualOut"));
auto *d_x_data = dev_ctx.template Alloc<T>(d_x, d_x->numel() * sizeof(T));
// when qkv_bias is not nullptr, d_qkv_out is equals to d_qkv_bias_out, the
// space can be reused.
auto *d_qkv_out_data = (d_qkv_bias_out != nullptr)
? nullptr
: dev_ctx.template Alloc<T>(
d_qkv_out, d_qkv_out->numel() * sizeof(T));
auto *d_qkv_bias_out_data =
(d_qkv_bias_out == nullptr)
? nullptr
: dev_ctx.template Alloc<T>(d_qkv_bias_out,
d_qkv_bias_out->numel() * sizeof(T));
auto *d_qktv_out_data =
dev_ctx.template Alloc<T>(d_qktv_out, d_qktv_out->numel() * sizeof(T));
auto *d_transpose_out_2_data = dev_ctx.template Alloc<T>(
d_transpose_out_2, d_transpose_out_2->numel() * sizeof(T));
auto *d_qk_out_data =
dev_ctx.template Alloc<T>(d_qk_out, d_qk_out->numel() * sizeof(T));
auto *d_softmax_out_data = dev_ctx.template Alloc<T>(
d_softmax_out, d_softmax_out->numel() * sizeof(T));
auto *d_attn_dropout_out_data =
has_attn_dropout
? dev_ctx.template Alloc<T>(d_attn_dropout_out,
d_attn_dropout_out->numel() * sizeof(T))
: nullptr;
auto *d_src_mask_out_data =
(src_mask == nullptr)
? nullptr
: dev_ctx.template Alloc<T>(d_src_mask_out,
d_src_mask_out->numel() * sizeof(T));
auto *d_fmha_out_data =
dev_ctx.template Alloc<T>(d_fmha_out, d_fmha_out->numel() * sizeof(T));
auto *d_out_linear_out_data = dev_ctx.template Alloc<T>(
d_out_linear_out, d_out_linear_out->numel() * sizeof(T));
// parameter grad
auto *d_qkv_weight =
ctx.Output<phi::DenseTensor>(framework::GradVarName("QKVW"));
auto *d_qkv_bias =
ctx.Output<phi::DenseTensor>(framework::GradVarName("QKVBias"));
auto *d_out_linear_weight =
ctx.Output<phi::DenseTensor>(framework::GradVarName("OutLinearW"));
auto *d_out_linear_bias =
ctx.Output<phi::DenseTensor>(framework::GradVarName("OutLinearBias"));
auto *d_ln_2_scale =
ctx.Output<phi::DenseTensor>(framework::GradVarName("Ln2Scale"));
auto *d_ln_2_bias =
ctx.Output<phi::DenseTensor>(framework::GradVarName("Ln2Bias"));
auto *d_qkv_weight_data =
(d_qkv_weight == nullptr)
? nullptr
: dev_ctx.template Alloc<T>(d_qkv_weight,
d_qkv_weight->numel() * sizeof(T));
auto *d_qkv_bias_data =
(d_qkv_bias == nullptr)
? nullptr
: dev_ctx.template Alloc<T>(d_qkv_bias,
d_qkv_bias->numel() * sizeof(T));
auto *d_out_linear_weight_data =
(d_out_linear_weight == nullptr)
? nullptr
: dev_ctx.template Alloc<T>(
d_out_linear_weight,
d_out_linear_weight->numel() * sizeof(T));
auto *d_out_linear_bias_data =
(d_out_linear_bias == nullptr)
? nullptr
: dev_ctx.template Alloc<T>(d_out_linear_bias,
d_out_linear_bias->numel() * sizeof(T));
const auto input_x_dims = input_x->dims();
const auto qkv_w_dims = qkv_weight->dims();
int batch_size = input_x_dims[0];
int max_seq_len = input_x_dims[1];
int dim_embed = input_x_dims[2];
int num_head;
int dim_head;
int nranks = 1;
if (!transpose_qkv_wb) {
num_head = qkv_w_dims[1];
dim_head = qkv_w_dims[2];
} else {
nranks = (qkv_w_dims[0] * 3) / qkv_w_dims[1];
num_head = num_heads;
dim_head = dim_embed / (num_head * nranks);
}
int bsz_seq = batch_size * max_seq_len;
int hidden_size = num_head * dim_head;
int output_size = 3 * hidden_size;
int input_size = dim_embed;
bool add_residual = ctx.Attr<bool>("add_residual");
phi::DenseTensor d_residual;
T *d_residual_data = nullptr;
if (add_residual) {
d_residual.Resize(input_x_dims);
d_residual_data = dev_ctx.template Alloc<T>(
&d_residual, d_residual.numel() * sizeof(T));
}
bool transA = false;
bool transB = transpose_qkv_wb ? false : true;
bool compute_qkv_bias = qkv_bias ? true : false;
auto layer_norm_compute = AttnLayerNorm<T>(
ctx.cuda_device_context(), epsilon, bsz_seq, dim_embed);
auto qkv_compute = AttnMatMul<T>(ctx.cuda_device_context(),
transA,
transB,
bsz_seq,
output_size,
input_size,
compute_qkv_bias);
AttnDropoutParam attn_dropout_param(is_test_1,
dropout_implementation_1,
attn_dropout_prob,
is_upscale_in_train_1,
is_fix_seed_1,
seed_val_1,
seed_1);
auto fmha_ref_compute = FMHARef<T>(ctx.cuda_device_context(),
batch_size,
max_seq_len,
num_head,
dim_head,
attn_dropout_param);
output_size = hidden_size;
transA = false;
transB = false;
bool compute_bias = false;
// (b*s, num_head * dim_head) * (num_head * dim_head, dim_embed)
auto out_linear_compute = AttnMatMul<T>(ctx.cuda_device_context(),
transA,
transB,
bsz_seq,
input_size,
output_size,
compute_bias);
FusedDropoutLayerNormHelper<T, uint8_t> fused_dropout_layernorm_helper(
ctx.cuda_device_context(),
bsz_seq,
dim_embed,
dropout_param2,
ln2epsilon);
if (pre_layer_norm) {
fused_dropout_layernorm_helper.ResidualDropoutBiasGrad(
ctx.cuda_device_context(),
d_y_data,
dropout_mask_out_data,
d_out_linear_out_data,
d_residual_data,
d_out_linear_bias_data);
} else {
auto *ln_2_mean_data = ln_2_mean->data<U>();
auto *ln_2_var_data = ln_2_var->data<U>();
auto *bias_dropout_residual_out_data =
bias_dropout_residual_out->data<T>();
auto *d_ln_2_scale_data =
(d_ln_2_scale == nullptr
? nullptr
: dev_ctx.template Alloc<U>(d_ln_2_scale,
d_ln_2_scale->numel() * sizeof(U)));
auto *d_ln_2_bias_data =
(d_ln_2_bias == nullptr
? nullptr
: dev_ctx.template Alloc<U>(d_ln_2_bias,
d_ln_2_bias->numel() * sizeof(U)));
auto *d_bias_dropout_residual_out_data = dev_ctx.template Alloc<T>(
d_bias_dropout_residual_out,
d_bias_dropout_residual_out->numel() * sizeof(T));
fused_dropout_layernorm_helper.LayernormResidualDropoutBiasGrad(
ctx.cuda_device_context(),
d_y_data,
bias_dropout_residual_out_data,
dropout_mask_out_data,
ln_2_scale_data,
ln_2_mean_data,
ln_2_var_data,
d_bias_dropout_residual_out_data,
d_ln_2_scale_data,
d_ln_2_bias_data,
d_out_linear_out_data,
d_out_linear_bias_data,
d_residual_data);
}
out_linear_compute.ComputeBackward(fmha_out,
out_linear_weight,
d_out_linear_out,
d_fmha_out,
d_out_linear_weight,
nullptr);
if (transpose_qkv_wb) {
if (compute_qkv_bias) {
d_qkv_bias_out->Resize(
{batch_size, max_seq_len, 3, num_head, dim_head});
} else {
d_qkv_out->Resize({batch_size, max_seq_len, 3, num_head, dim_head});
}
}
if (qkv_bias != nullptr) {
fmha_ref_compute.ComputeBackward(*transpose_out_2,
has_attn_dropout ? src_mask : nullptr,
*softmax_out,
*attn_dropout_mask_out,
*attn_dropout_out,
*qk_out,
*src_mask_out,
*d_fmha_out,
d_qktv_out,
d_attn_dropout_out,
d_softmax_out,
d_src_mask_out,
d_qk_out,
d_transpose_out_2,
nullptr,
d_qkv_bias_out);
} else {
fmha_ref_compute.ComputeBackward(*transpose_out_2,
has_attn_dropout ? src_mask : nullptr,
*softmax_out,
*attn_dropout_mask_out,
*attn_dropout_out,
*qk_out,
*src_mask_out,
*d_fmha_out,
d_qktv_out,
d_attn_dropout_out,
d_softmax_out,
d_src_mask_out,
d_qk_out,
d_transpose_out_2,
nullptr,
d_qkv_out);
}
if (transpose_qkv_wb) {
if (compute_qkv_bias) {
d_qkv_bias_out->Resize({batch_size, max_seq_len, 3 * hidden_size});
} else {
d_qkv_out->Resize({batch_size, max_seq_len, 3 * hidden_size});
}
}
if (pre_layer_norm) {
auto *ln_mean = ctx.Input<phi::DenseTensor>("LnMean");
auto *ln_var = ctx.Input<phi::DenseTensor>("LnVariance");
auto *ln_out = ctx.Input<phi::DenseTensor>("LnOut");
auto *ln_mean_data = ln_mean->data<U>();
auto *ln_var_data = ln_var->data<U>();
auto *ln_out_data = ln_out->data<T>();
auto *d_ln_out =
ctx.Output<phi::DenseTensor>(framework::GradVarName("LnOut"));
auto *d_ln_scale =
ctx.Output<phi::DenseTensor>(framework::GradVarName("LnScale"));
auto *d_ln_bias =
ctx.Output<phi::DenseTensor>(framework::GradVarName("LnBias"));
auto *d_ln_out_data =
dev_ctx.template Alloc<T>(d_ln_out, d_ln_out->numel() * sizeof(T));
auto *d_ln_scale_data =
(d_ln_scale == nullptr
? nullptr
: dev_ctx.template Alloc<U>(d_ln_scale,
d_ln_scale->numel() * sizeof(U)));
auto *d_ln_bias_data =
(d_ln_bias == nullptr
? nullptr
: dev_ctx.template Alloc<U>(d_ln_bias,
d_ln_bias->numel() * sizeof(U)));
if (qkv_bias != nullptr) {
qkv_compute.ComputeBackward(ln_out,
qkv_weight,
d_qkv_bias_out,
d_ln_out,
d_qkv_weight,
d_qkv_bias);
} else {
qkv_compute.ComputeBackward(
ln_out, qkv_weight, d_qkv_out, d_ln_out, d_qkv_weight, d_qkv_bias);
}
// tensor model parallel
AllReduce<T>(*d_ln_out, ring_id, ctx.cuda_device_context());
layer_norm_compute.ComputeBackward(x_data,
d_ln_out_data,
ln_scale_data,
ln_mean_data,
ln_var_data,
d_x_data,
d_ln_scale_data,
d_ln_bias_data);
} else {
if (qkv_bias != nullptr) {
qkv_compute.ComputeBackward(
input_x, qkv_weight, d_qkv_bias_out, d_x, d_qkv_weight, d_qkv_bias);
} else {
qkv_compute.ComputeBackward(
input_x, qkv_weight, d_qkv_out, d_x, d_qkv_weight, d_qkv_bias);
}
// tensor model parallel
AllReduce<T>(*d_x, ring_id, ctx.cuda_device_context());
}
if (add_residual) {
// gradient accumulation
std::vector<const phi::DenseTensor *> ins = {&d_residual, d_x};
std::vector<phi::DenseTensor *> outs = {d_x};
phi::funcs::ElementwiseKernel<T>(
ctx.cuda_device_context(), ins, &outs, phi::funcs::AddFunctor<T>());
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(fused_attention,
ops::FusedAttentionOpKernel<float>,
ops::FusedAttentionOpKernel<double>,
ops::FusedAttentionOpKernel<plat::float16>);
REGISTER_OP_CUDA_KERNEL(fused_attention_grad,
ops::FusedAttentionGradKernel<float>,
ops::FusedAttentionGradKernel<double>,
ops::FusedAttentionGradKernel<plat::float16>);
| b2e4f154de99fc5161618c76203fb1f169f07aa4.cu | /* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <cuda_fp16.h>
#include <cub/cub.cuh>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/operators/fused/attention_layer_norm.h"
#include "paddle/fluid/operators/fused/attn_gemm.h"
#include "paddle/fluid/operators/fused/fmha_ref.h"
#include "paddle/fluid/operators/fused/fused_dropout_helper.h"
#include "paddle/fluid/platform/device/gpu/gpu_dnn.h"
#include "paddle/phi/api/include/tensor.h"
#include "paddle/phi/backends/gpu/gpu_device_function.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/kernels/funcs/broadcast_function.h"
#include "paddle/phi/kernels/funcs/elementwise_functor.h"
#include "paddle/phi/kernels/funcs/functors.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/transpose_function.cu.h"
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
#include "paddle/fluid/distributed/collective/process_group_nccl.h"
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/device/gpu/nccl_helper.h"
#endif
namespace paddle {
namespace operators {
template <typename T>
static void AllReduce(phi::DenseTensor &tensor, // NOLINT
const int ring_id,
const phi::GPUContext &ctx) {
if (ring_id == -1) return;
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
auto map = paddle::distributed::ProcessGroupMapFromGid::getInstance();
if (map->has(ring_id)) {
paddle::distributed::ProcessGroup *pg = map->get(ring_id);
auto pg_nccl = static_cast<distributed::ProcessGroupNCCL *>(pg);
paddle::distributed::AllreduceOptions opts;
opts.reduce_op = distributed::ReduceOp::SUM;
auto task = pg_nccl->AllReduce(&tensor, tensor, opts, true, true);
task->Wait();
} else {
auto dtype = platform::ToNCCLDataType(
framework::TransToProtoVarType(tensor.dtype()));
int64_t numel = tensor.numel();
const void *sendbuff = tensor.data<T>();
auto place = ctx.GetPlace();
void *recvbuff = ctx.template Alloc<T>(&tensor, tensor.numel() * sizeof(T));
auto comm = platform::NCCLCommContext::Instance().Get(ring_id, place);
auto stream = ctx.stream();
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce(
sendbuff, recvbuff, numel, dtype, ncclSum, comm->comm(), stream));
}
#else
PADDLE_THROW(platform::errors::Unimplemented(
"PaddlePaddle should compile with NCCL or RCCL when used tensor model "
"parallel op."));
#endif
}
template <typename T>
class FusedAttentionOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
using U = LayerNormParamType<T>;
auto *input_x = ctx.Input<phi::DenseTensor>("X");
auto &dev_ctx = ctx.template device_context<phi::GPUContext>();
const auto pre_layer_norm = ctx.Attr<bool>("pre_layer_norm");
const float epsilon = ctx.Attr<float>("epsilon");
auto *ln_scale = ctx.Input<phi::DenseTensor>("LnScale");
auto *ln_bias = ctx.Input<phi::DenseTensor>("LnBias");
auto *ln_mean = ctx.Output<phi::DenseTensor>("LnMean");
auto *ln_var = ctx.Output<phi::DenseTensor>("LnVariance");
auto *ln_out = ctx.Output<phi::DenseTensor>("LnOut");
const auto num_heads = ctx.Attr<int>("num_heads");
const auto transpose_qkv_wb = ctx.Attr<bool>("transpose_qkv_wb");
// x: qkv's input [batch_size, seq_len, dim_embed]
// if transpose_qkv_wb is False
// y: qkv's weight: [3, num_head, dim_head, dim_embed]
// if transpose_qkv_wb is True
// y: qkv's weight: [dim_embed, 3 * dim_embed]
auto *qkv_weight = ctx.Input<phi::DenseTensor>("QKVW");
auto *qkv_bias = ctx.Input<phi::DenseTensor>("QKVBias");
auto *qkv_out = ctx.Output<phi::DenseTensor>("QKVOut");
auto *qkv_bias_out = ctx.Output<phi::DenseTensor>("QKVBiasOut");
auto *src_mask = ctx.Input<phi::DenseTensor>("SrcMask");
auto *transpose_out_2 = ctx.Output<phi::DenseTensor>("TransposeOut2");
auto *cache_kv = ctx.Input<phi::DenseTensor>("CacheKV");
auto *cache_kv_out = ctx.Output<phi::DenseTensor>("CacheKVOut");
auto *qk_out = ctx.Output<phi::DenseTensor>("QKOut");
auto *qktv_out = ctx.Output<phi::DenseTensor>("QKTVOut");
auto *softmax_out = ctx.Output<phi::DenseTensor>("SoftmaxOut");
auto *attn_dropout_mask_out =
ctx.Output<phi::DenseTensor>("AttnDropoutMaskOut");
auto *attn_dropout_out = ctx.Output<phi::DenseTensor>("AttnDropoutOut");
auto *src_mask_out = ctx.Output<phi::DenseTensor>("SrcMaskOut");
auto *fmha_out = ctx.Output<phi::DenseTensor>("FMHAOut");
auto *out_linear_weight = ctx.Input<phi::DenseTensor>("OutLinearW");
auto *out_linear_bias = ctx.Input<phi::DenseTensor>("OutLinearBias");
auto *out_linear_out = ctx.Output<phi::DenseTensor>("OutLinearOut");
auto *ln_scale_2 = ctx.Input<phi::DenseTensor>("Ln2Scale");
auto *ln_bias_2 = ctx.Input<phi::DenseTensor>("Ln2Bias");
auto *dropout_mask_out = ctx.Output<phi::DenseTensor>("DropoutMaskOut");
auto *bias_dropout_residual_out =
ctx.Output<phi::DenseTensor>("BiasDropoutResidualOut");
auto *ln_mean_2 = ctx.Output<phi::DenseTensor>("Ln2Mean");
auto *ln_var_2 = ctx.Output<phi::DenseTensor>("Ln2Variance");
const float ln_epsilon = ctx.Attr<float>("ln_epsilon");
float attn_dropout_rate = ctx.Attr<float>("attn_dropout_rate");
const bool has_attn_dropout = (attn_dropout_rate != 0.0f);
DropoutParam dropout_param2(ctx, 0);
const bool has_dropout = (dropout_param2.dropout_prob != 0.0f);
bool is_test_1 = ctx.Attr<bool>("is_test");
auto &dropout_implementation_1 =
ctx.Attr<std::string>("attn_dropout_implementation");
bool is_upscale_in_train_1 =
(dropout_implementation_1 == "upscale_in_train");
auto *seed_1 =
ctx.HasInput("Seed1") ? ctx.Input<phi::DenseTensor>("Seed1") : nullptr;
bool is_fix_seed_1 = ctx.Attr<bool>("attn_dropout_fix_seed");
int seed_val_1 = ctx.Attr<int>("attn_dropout_seed");
int ring_id = ctx.Attr<int>("ring_id");
// final output.
auto *out = ctx.Output<phi::DenseTensor>("Y");
// get data ptr for qkv part.
const auto input_x_dims = input_x->dims();
const auto qkv_w_dims = qkv_weight->dims();
auto *x_data = input_x->data<T>();
auto *qkv_weight_data = qkv_weight->data<T>();
auto *qkv_bias_data = (qkv_bias == nullptr) ? nullptr : qkv_bias->data<T>();
auto *qkv_out_data =
dev_ctx.template Alloc<T>(qkv_out, qkv_out->numel() * sizeof(T));
auto *qkv_bias_out_data =
(qkv_bias == nullptr)
? nullptr
: dev_ctx.template Alloc<T>(qkv_bias_out,
qkv_bias_out->numel() * sizeof(T));
// get data ptr for FMHA.
auto *transpose_out_2_data = dev_ctx.template Alloc<T>(
transpose_out_2, transpose_out_2->numel() * sizeof(T));
auto *cache_kv_out_data =
(cache_kv_out == nullptr)
? nullptr
: dev_ctx.template Alloc<T>(cache_kv_out,
cache_kv_out->numel() * sizeof(T));
auto *qk_out_data =
dev_ctx.template Alloc<T>(qk_out, qk_out->numel() * sizeof(T));
auto *qktv_out_data =
dev_ctx.template Alloc<T>(qktv_out, qktv_out->numel() * sizeof(T));
auto *src_mask_out_data =
(src_mask == nullptr)
? nullptr
: dev_ctx.template Alloc<T>(src_mask_out,
src_mask_out->numel() * sizeof(T));
auto *softmax_out_data = dev_ctx.template Alloc<T>(
softmax_out, softmax_out->numel() * sizeof(T));
auto *attn_dropout_mask_out_data =
has_attn_dropout ? dev_ctx.template Alloc<uint8_t>(
attn_dropout_mask_out,
attn_dropout_mask_out->numel() * sizeof(uint8_t))
: nullptr;
auto *attn_dropout_out_data =
has_attn_dropout
? dev_ctx.template Alloc<T>(attn_dropout_out,
attn_dropout_out->numel() * sizeof(T))
: nullptr;
auto *fmha_out_data =
dev_ctx.template Alloc<T>(fmha_out, fmha_out->numel() * sizeof(T));
// get data ptr for out_linear.
auto *out_linear_weight_data = out_linear_weight->data<T>();
auto *out_linear_bias_data =
(out_linear_bias == nullptr) ? nullptr : out_linear_bias->data<T>();
auto *out_linear_out_data = dev_ctx.template Alloc<T>(
out_linear_out, out_linear_out->numel() * sizeof(T));
// get data ptr for bias+dropout+residual+layernorm
auto *dropout_mask_out_data =
has_dropout
? dev_ctx.template Alloc<uint8_t>(
dropout_mask_out, dropout_mask_out->numel() * sizeof(uint8_t))
: nullptr;
auto *final_out_data =
dev_ctx.template Alloc<T>(out, out->numel() * sizeof(T));
int batch_size = input_x_dims[0];
int max_seq_len = input_x_dims[1];
int dim_embed = input_x_dims[2];
int num_head;
int dim_head;
int nranks = 1;
// get num_head and dim_head in two different ways
if (!transpose_qkv_wb) {
num_head = qkv_w_dims[1];
dim_head = qkv_w_dims[2];
} else {
nranks = (qkv_w_dims[0] * 3) / qkv_w_dims[1];
num_head = num_heads;
dim_head = dim_embed / (num_head * nranks);
}
int bsz_seq = batch_size * max_seq_len;
int hidden_size = num_head * dim_head;
int output_size = 3 * hidden_size;
int input_size = dim_embed;
auto layer_norm_compute = AttnLayerNorm<T>(
ctx.cuda_device_context(), epsilon, bsz_seq, dim_embed);
bool compute_bias = true;
if (qkv_bias == nullptr) {
compute_bias = false;
}
// (transA, transB, compute_bias) = (false, true, true)
bool transB = transpose_qkv_wb ? false : true;
auto qkv_compute = AttnMatMul<T>(ctx.cuda_device_context(),
false,
transB,
bsz_seq,
output_size,
input_size,
compute_bias);
AttnDropoutParam attn_dropout_param(is_test_1,
dropout_implementation_1,
attn_dropout_rate,
is_upscale_in_train_1,
is_fix_seed_1,
seed_val_1,
seed_1);
auto fmha_ref_compute = FMHARef<T>(ctx.cuda_device_context(),
batch_size,
max_seq_len,
num_head,
dim_head,
attn_dropout_param);
output_size = hidden_size;
// (transA, transB, compute_bias) = (false, false, false)
// NOTE(Yuang Liu): For general input size == output size, change the
// position won't have effects. For mp, the output size is mp_head * dkey
// which is actually the input size. While the input size is hidden size,
// which is actually the output size. So for out linear, switch the
// input size and output size.
auto out_linear_compute = AttnMatMul<T>(ctx.cuda_device_context(),
false,
false,
bsz_seq,
input_size,
output_size,
false);
FusedDropoutLayerNormHelper<T, uint8_t> fused_dropout_layernorm_helper(
ctx.cuda_device_context(),
bsz_seq,
dim_embed,
dropout_param2,
ln_epsilon);
if (pre_layer_norm) {
auto *ln_scale_data =
(ln_scale == nullptr ? nullptr : ln_scale->data<U>());
auto *ln_bias_data = (ln_bias == nullptr ? nullptr : ln_bias->data<U>());
auto *ln_mean_data =
dev_ctx.template Alloc<U>(ln_mean, ln_mean->numel() * sizeof(U));
auto *ln_var_data =
dev_ctx.template Alloc<U>(ln_var, ln_var->numel() * sizeof(U));
auto *ln_out_data =
dev_ctx.template Alloc<T>(ln_out, ln_out->numel() * sizeof(T));
layer_norm_compute.ComputeForward(x_data,
ln_scale_data,
ln_bias_data,
ln_out_data,
ln_mean_data,
ln_var_data);
qkv_compute.ComputeForward(
qkv_weight, ln_out, qkv_bias, qkv_out, qkv_bias_out);
} else {
qkv_compute.ComputeForward(
qkv_weight, input_x, qkv_bias, qkv_out, qkv_bias_out);
}
if (transpose_qkv_wb) {
// resize the output for fmha compute
qkv_out->Resize({batch_size, max_seq_len, 3, num_head, dim_head});
qkv_bias_out->Resize({batch_size, max_seq_len, 3, num_head, dim_head});
}
if (qkv_bias == nullptr) {
fmha_ref_compute.ComputeForward(*qkv_out,
cache_kv,
src_mask,
transpose_out_2,
cache_kv_out,
qk_out,
src_mask_out,
softmax_out,
attn_dropout_mask_out,
attn_dropout_out,
qktv_out,
fmha_out);
} else {
fmha_ref_compute.ComputeForward(*qkv_bias_out,
cache_kv,
src_mask,
transpose_out_2,
cache_kv_out,
qk_out,
src_mask_out,
softmax_out,
attn_dropout_mask_out,
attn_dropout_out,
qktv_out,
fmha_out);
}
if (transpose_qkv_wb) {
// resize the output back to make the shape compatible with infer shape
qkv_out->Resize({batch_size, max_seq_len, 3 * hidden_size});
qkv_bias_out->Resize({batch_size, max_seq_len, 3 * hidden_size});
}
// fmha_out: [batch_size, seq_len, num_head, head_dim]
// weight: [embed_dim, embed_dim]
// out_linear_out: [batch_size, seq_len, embed_dim]
out_linear_compute.ComputeForward(
out_linear_weight, fmha_out, nullptr, out_linear_out, nullptr);
// tensor model parallel
AllReduce<T>(*out_linear_out, ring_id, ctx.cuda_device_context());
bool add_residual = ctx.Attr<bool>("add_residual");
const T *residual_ptr = add_residual ? x_data : nullptr;
if (pre_layer_norm) {
// output = (residual + dropout(input + bias))
fused_dropout_layernorm_helper.ResidualDropoutBias(
ctx.cuda_device_context(),
out_linear_out_data,
residual_ptr,
out_linear_bias_data,
final_out_data,
dropout_mask_out_data);
} else {
// TODO(Xreki): support post layer_norm case when add_residual is false.
PADDLE_ENFORCE_EQ(add_residual,
true,
platform::errors::InvalidArgument(
"Attribute add_residual is expected to be true "
"when pre_layer_norm is false."));
const U *ln_scale_2_ptr = ln_scale_2 ? ln_scale_2->data<U>() : nullptr;
const U *ln_bias_2_ptr = ln_bias_2 ? ln_bias_2->data<U>() : nullptr;
T *bias_dropout_residual_out_ptr = dev_ctx.template Alloc<T>(
bias_dropout_residual_out,
bias_dropout_residual_out->numel() * sizeof(T));
U *ln_mean_2_ptr =
dev_ctx.template Alloc<U>(ln_mean_2, ln_mean_2->numel() * sizeof(U));
U *ln_var_2_ptr =
dev_ctx.template Alloc<U>(ln_var_2, ln_var_2->numel() * sizeof(U));
// output = layernorm(residual + dropout(input + bias))
fused_dropout_layernorm_helper.LayernormResidualDropoutBias(
ctx.cuda_device_context(),
out_linear_out_data,
residual_ptr,
out_linear_bias_data,
ln_scale_2_ptr,
ln_bias_2_ptr,
bias_dropout_residual_out_ptr,
dropout_mask_out_data,
final_out_data,
ln_mean_2_ptr,
ln_var_2_ptr);
}
}
};
template <typename T>
class FusedAttentionGradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
using U = LayerNormParamType<T>;
const int num_heads = ctx.Attr<int>("num_heads");
const bool transpose_qkv_wb = ctx.Attr<bool>("transpose_qkv_wb");
const auto pre_layer_norm = ctx.Attr<bool>("pre_layer_norm");
const float epsilon = ctx.Attr<float>("epsilon");
const float ln2epsilon = ctx.Attr<float>("ln_epsilon");
const float attn_dropout_prob = ctx.Attr<float>("attn_dropout_rate");
const bool has_attn_dropout = (attn_dropout_prob != 0.0f);
DropoutParam dropout_param2(ctx, 0);
const bool has_dropout = (dropout_param2.dropout_prob != 0.0f);
auto &dev_ctx = ctx.template device_context<phi::GPUContext>();
bool is_test_1 = ctx.Attr<bool>("is_test");
auto &dropout_implementation_1 =
ctx.Attr<std::string>("attn_dropout_implementation");
bool is_upscale_in_train_1 =
(dropout_implementation_1 == "upscale_in_train");
auto *seed_1 =
ctx.HasInput("Seed1") ? ctx.Input<phi::DenseTensor>("Seed1") : nullptr;
bool is_fix_seed_1 = ctx.Attr<bool>("attn_dropout_fix_seed");
int seed_val_1 = ctx.Attr<int>("attn_dropout_seed");
int ring_id = ctx.Attr<int>("ring_id");
// get inputs.
auto *d_y = ctx.Input<phi::DenseTensor>(framework::GradVarName("Y"));
auto *d_y_data = d_y->data<T>();
// fw input
auto *input_x = ctx.Input<phi::DenseTensor>("X");
auto *ln_scale = ctx.Input<phi::DenseTensor>("LnScale");
auto *ln_2_scale = ctx.Input<phi::DenseTensor>("Ln2Scale");
auto *x_data = input_x->data<T>();
auto *ln_scale_data = (ln_scale == nullptr ? nullptr : ln_scale->data<U>());
auto *ln_2_scale_data =
(ln_2_scale == nullptr ? nullptr : ln_2_scale->data<U>());
// fw parameters.
auto *src_mask = ctx.Input<phi::DenseTensor>("SrcMask");
auto *qkv_weight = ctx.Input<phi::DenseTensor>("QKVW");
auto *qkv_bias = ctx.Input<phi::DenseTensor>("QKVBias");
auto *out_linear_weight = ctx.Input<phi::DenseTensor>("OutLinearW");
auto *out_linear_bias = ctx.Input<phi::DenseTensor>("OutLinearBias");
auto *qkv_weight_data = qkv_weight->data<T>();
auto *qkv_bias_data = (qkv_bias == nullptr) ? nullptr : qkv_bias->data<T>();
auto *out_linear_weight_data = out_linear_weight->data<T>();
auto *out_linear_bias_data =
(out_linear_bias == nullptr) ? nullptr : out_linear_bias->data<T>();
// fw output
auto *fmha_out = ctx.Input<phi::DenseTensor>("FMHAOut");
auto *transpose_out_2 = ctx.Input<phi::DenseTensor>("TransposeOut2");
auto *qk_out = ctx.Input<phi::DenseTensor>("QKOut");
auto *softmax_out = ctx.Input<phi::DenseTensor>("SoftmaxOut");
auto *attn_dropout_mask_out =
ctx.Input<phi::DenseTensor>("AttnDropoutMaskOut");
auto *attn_dropout_out = ctx.Input<phi::DenseTensor>("AttnDropoutOut");
auto *src_mask_out = ctx.Input<phi::DenseTensor>("SrcMaskOut");
auto *ln_2_mean = ctx.Input<phi::DenseTensor>("Ln2Mean");
auto *ln_2_var = ctx.Input<phi::DenseTensor>("Ln2Variance");
auto *dropout_mask_out = ctx.Input<phi::DenseTensor>("DropoutMaskOut");
auto *bias_dropout_residual_out =
ctx.Input<phi::DenseTensor>("BiasDropoutResidualOut");
auto *fmha_out_data = fmha_out->data<T>();
auto *transpose_out_2_data = transpose_out_2->data<T>();
auto *softmax_out_data = softmax_out->data<T>();
auto *src_mask_out_data =
(src_mask == nullptr) ? nullptr : src_mask_out->data<T>();
auto *dropout_mask_out_data =
has_dropout ? dropout_mask_out->data<uint8_t>() : nullptr;
// output's grad
auto *d_x = ctx.Output<phi::DenseTensor>(framework::GradVarName("X"));
auto *d_qkv_out =
ctx.Output<phi::DenseTensor>(framework::GradVarName("QKVOut"));
auto *d_qkv_bias_out =
ctx.Output<phi::DenseTensor>(framework::GradVarName("QKVBiasOut"));
auto *d_qktv_out =
ctx.Output<phi::DenseTensor>(framework::GradVarName("QKTVOut"));
auto *d_transpose_out_2 =
ctx.Output<phi::DenseTensor>(framework::GradVarName("TransposeOut2"));
auto *d_qk_out =
ctx.Output<phi::DenseTensor>(framework::GradVarName("QKOut"));
auto *d_softmax_out =
ctx.Output<phi::DenseTensor>(framework::GradVarName("SoftmaxOut"));
auto *d_attn_dropout_out =
ctx.Output<phi::DenseTensor>(framework::GradVarName("AttnDropoutOut"));
auto *d_src_mask_out =
ctx.Output<phi::DenseTensor>(framework::GradVarName("SrcMaskOut"));
auto *d_fmha_out =
ctx.Output<phi::DenseTensor>(framework::GradVarName("FMHAOut"));
auto *d_out_linear_out =
ctx.Output<phi::DenseTensor>(framework::GradVarName("OutLinearOut"));
auto *d_bias_dropout_residual_out = ctx.Output<phi::DenseTensor>(
framework::GradVarName("BiasDropoutResidualOut"));
auto *d_x_data = dev_ctx.template Alloc<T>(d_x, d_x->numel() * sizeof(T));
// when qkv_bias is not nullptr, d_qkv_out is equals to d_qkv_bias_out, the
// space can be reused.
auto *d_qkv_out_data = (d_qkv_bias_out != nullptr)
? nullptr
: dev_ctx.template Alloc<T>(
d_qkv_out, d_qkv_out->numel() * sizeof(T));
auto *d_qkv_bias_out_data =
(d_qkv_bias_out == nullptr)
? nullptr
: dev_ctx.template Alloc<T>(d_qkv_bias_out,
d_qkv_bias_out->numel() * sizeof(T));
auto *d_qktv_out_data =
dev_ctx.template Alloc<T>(d_qktv_out, d_qktv_out->numel() * sizeof(T));
auto *d_transpose_out_2_data = dev_ctx.template Alloc<T>(
d_transpose_out_2, d_transpose_out_2->numel() * sizeof(T));
auto *d_qk_out_data =
dev_ctx.template Alloc<T>(d_qk_out, d_qk_out->numel() * sizeof(T));
auto *d_softmax_out_data = dev_ctx.template Alloc<T>(
d_softmax_out, d_softmax_out->numel() * sizeof(T));
auto *d_attn_dropout_out_data =
has_attn_dropout
? dev_ctx.template Alloc<T>(d_attn_dropout_out,
d_attn_dropout_out->numel() * sizeof(T))
: nullptr;
auto *d_src_mask_out_data =
(src_mask == nullptr)
? nullptr
: dev_ctx.template Alloc<T>(d_src_mask_out,
d_src_mask_out->numel() * sizeof(T));
auto *d_fmha_out_data =
dev_ctx.template Alloc<T>(d_fmha_out, d_fmha_out->numel() * sizeof(T));
auto *d_out_linear_out_data = dev_ctx.template Alloc<T>(
d_out_linear_out, d_out_linear_out->numel() * sizeof(T));
// parameter grad
auto *d_qkv_weight =
ctx.Output<phi::DenseTensor>(framework::GradVarName("QKVW"));
auto *d_qkv_bias =
ctx.Output<phi::DenseTensor>(framework::GradVarName("QKVBias"));
auto *d_out_linear_weight =
ctx.Output<phi::DenseTensor>(framework::GradVarName("OutLinearW"));
auto *d_out_linear_bias =
ctx.Output<phi::DenseTensor>(framework::GradVarName("OutLinearBias"));
auto *d_ln_2_scale =
ctx.Output<phi::DenseTensor>(framework::GradVarName("Ln2Scale"));
auto *d_ln_2_bias =
ctx.Output<phi::DenseTensor>(framework::GradVarName("Ln2Bias"));
auto *d_qkv_weight_data =
(d_qkv_weight == nullptr)
? nullptr
: dev_ctx.template Alloc<T>(d_qkv_weight,
d_qkv_weight->numel() * sizeof(T));
auto *d_qkv_bias_data =
(d_qkv_bias == nullptr)
? nullptr
: dev_ctx.template Alloc<T>(d_qkv_bias,
d_qkv_bias->numel() * sizeof(T));
auto *d_out_linear_weight_data =
(d_out_linear_weight == nullptr)
? nullptr
: dev_ctx.template Alloc<T>(
d_out_linear_weight,
d_out_linear_weight->numel() * sizeof(T));
auto *d_out_linear_bias_data =
(d_out_linear_bias == nullptr)
? nullptr
: dev_ctx.template Alloc<T>(d_out_linear_bias,
d_out_linear_bias->numel() * sizeof(T));
const auto input_x_dims = input_x->dims();
const auto qkv_w_dims = qkv_weight->dims();
int batch_size = input_x_dims[0];
int max_seq_len = input_x_dims[1];
int dim_embed = input_x_dims[2];
int num_head;
int dim_head;
int nranks = 1;
if (!transpose_qkv_wb) {
num_head = qkv_w_dims[1];
dim_head = qkv_w_dims[2];
} else {
nranks = (qkv_w_dims[0] * 3) / qkv_w_dims[1];
num_head = num_heads;
dim_head = dim_embed / (num_head * nranks);
}
int bsz_seq = batch_size * max_seq_len;
int hidden_size = num_head * dim_head;
int output_size = 3 * hidden_size;
int input_size = dim_embed;
bool add_residual = ctx.Attr<bool>("add_residual");
phi::DenseTensor d_residual;
T *d_residual_data = nullptr;
if (add_residual) {
d_residual.Resize(input_x_dims);
d_residual_data = dev_ctx.template Alloc<T>(
&d_residual, d_residual.numel() * sizeof(T));
}
bool transA = false;
bool transB = transpose_qkv_wb ? false : true;
bool compute_qkv_bias = qkv_bias ? true : false;
auto layer_norm_compute = AttnLayerNorm<T>(
ctx.cuda_device_context(), epsilon, bsz_seq, dim_embed);
auto qkv_compute = AttnMatMul<T>(ctx.cuda_device_context(),
transA,
transB,
bsz_seq,
output_size,
input_size,
compute_qkv_bias);
AttnDropoutParam attn_dropout_param(is_test_1,
dropout_implementation_1,
attn_dropout_prob,
is_upscale_in_train_1,
is_fix_seed_1,
seed_val_1,
seed_1);
auto fmha_ref_compute = FMHARef<T>(ctx.cuda_device_context(),
batch_size,
max_seq_len,
num_head,
dim_head,
attn_dropout_param);
output_size = hidden_size;
transA = false;
transB = false;
bool compute_bias = false;
// (b*s, num_head * dim_head) * (num_head * dim_head, dim_embed)
auto out_linear_compute = AttnMatMul<T>(ctx.cuda_device_context(),
transA,
transB,
bsz_seq,
input_size,
output_size,
compute_bias);
FusedDropoutLayerNormHelper<T, uint8_t> fused_dropout_layernorm_helper(
ctx.cuda_device_context(),
bsz_seq,
dim_embed,
dropout_param2,
ln2epsilon);
if (pre_layer_norm) {
fused_dropout_layernorm_helper.ResidualDropoutBiasGrad(
ctx.cuda_device_context(),
d_y_data,
dropout_mask_out_data,
d_out_linear_out_data,
d_residual_data,
d_out_linear_bias_data);
} else {
auto *ln_2_mean_data = ln_2_mean->data<U>();
auto *ln_2_var_data = ln_2_var->data<U>();
auto *bias_dropout_residual_out_data =
bias_dropout_residual_out->data<T>();
auto *d_ln_2_scale_data =
(d_ln_2_scale == nullptr
? nullptr
: dev_ctx.template Alloc<U>(d_ln_2_scale,
d_ln_2_scale->numel() * sizeof(U)));
auto *d_ln_2_bias_data =
(d_ln_2_bias == nullptr
? nullptr
: dev_ctx.template Alloc<U>(d_ln_2_bias,
d_ln_2_bias->numel() * sizeof(U)));
auto *d_bias_dropout_residual_out_data = dev_ctx.template Alloc<T>(
d_bias_dropout_residual_out,
d_bias_dropout_residual_out->numel() * sizeof(T));
fused_dropout_layernorm_helper.LayernormResidualDropoutBiasGrad(
ctx.cuda_device_context(),
d_y_data,
bias_dropout_residual_out_data,
dropout_mask_out_data,
ln_2_scale_data,
ln_2_mean_data,
ln_2_var_data,
d_bias_dropout_residual_out_data,
d_ln_2_scale_data,
d_ln_2_bias_data,
d_out_linear_out_data,
d_out_linear_bias_data,
d_residual_data);
}
out_linear_compute.ComputeBackward(fmha_out,
out_linear_weight,
d_out_linear_out,
d_fmha_out,
d_out_linear_weight,
nullptr);
if (transpose_qkv_wb) {
if (compute_qkv_bias) {
d_qkv_bias_out->Resize(
{batch_size, max_seq_len, 3, num_head, dim_head});
} else {
d_qkv_out->Resize({batch_size, max_seq_len, 3, num_head, dim_head});
}
}
if (qkv_bias != nullptr) {
fmha_ref_compute.ComputeBackward(*transpose_out_2,
has_attn_dropout ? src_mask : nullptr,
*softmax_out,
*attn_dropout_mask_out,
*attn_dropout_out,
*qk_out,
*src_mask_out,
*d_fmha_out,
d_qktv_out,
d_attn_dropout_out,
d_softmax_out,
d_src_mask_out,
d_qk_out,
d_transpose_out_2,
nullptr,
d_qkv_bias_out);
} else {
fmha_ref_compute.ComputeBackward(*transpose_out_2,
has_attn_dropout ? src_mask : nullptr,
*softmax_out,
*attn_dropout_mask_out,
*attn_dropout_out,
*qk_out,
*src_mask_out,
*d_fmha_out,
d_qktv_out,
d_attn_dropout_out,
d_softmax_out,
d_src_mask_out,
d_qk_out,
d_transpose_out_2,
nullptr,
d_qkv_out);
}
if (transpose_qkv_wb) {
if (compute_qkv_bias) {
d_qkv_bias_out->Resize({batch_size, max_seq_len, 3 * hidden_size});
} else {
d_qkv_out->Resize({batch_size, max_seq_len, 3 * hidden_size});
}
}
if (pre_layer_norm) {
auto *ln_mean = ctx.Input<phi::DenseTensor>("LnMean");
auto *ln_var = ctx.Input<phi::DenseTensor>("LnVariance");
auto *ln_out = ctx.Input<phi::DenseTensor>("LnOut");
auto *ln_mean_data = ln_mean->data<U>();
auto *ln_var_data = ln_var->data<U>();
auto *ln_out_data = ln_out->data<T>();
auto *d_ln_out =
ctx.Output<phi::DenseTensor>(framework::GradVarName("LnOut"));
auto *d_ln_scale =
ctx.Output<phi::DenseTensor>(framework::GradVarName("LnScale"));
auto *d_ln_bias =
ctx.Output<phi::DenseTensor>(framework::GradVarName("LnBias"));
auto *d_ln_out_data =
dev_ctx.template Alloc<T>(d_ln_out, d_ln_out->numel() * sizeof(T));
auto *d_ln_scale_data =
(d_ln_scale == nullptr
? nullptr
: dev_ctx.template Alloc<U>(d_ln_scale,
d_ln_scale->numel() * sizeof(U)));
auto *d_ln_bias_data =
(d_ln_bias == nullptr
? nullptr
: dev_ctx.template Alloc<U>(d_ln_bias,
d_ln_bias->numel() * sizeof(U)));
if (qkv_bias != nullptr) {
qkv_compute.ComputeBackward(ln_out,
qkv_weight,
d_qkv_bias_out,
d_ln_out,
d_qkv_weight,
d_qkv_bias);
} else {
qkv_compute.ComputeBackward(
ln_out, qkv_weight, d_qkv_out, d_ln_out, d_qkv_weight, d_qkv_bias);
}
// tensor model parallel
AllReduce<T>(*d_ln_out, ring_id, ctx.cuda_device_context());
layer_norm_compute.ComputeBackward(x_data,
d_ln_out_data,
ln_scale_data,
ln_mean_data,
ln_var_data,
d_x_data,
d_ln_scale_data,
d_ln_bias_data);
} else {
if (qkv_bias != nullptr) {
qkv_compute.ComputeBackward(
input_x, qkv_weight, d_qkv_bias_out, d_x, d_qkv_weight, d_qkv_bias);
} else {
qkv_compute.ComputeBackward(
input_x, qkv_weight, d_qkv_out, d_x, d_qkv_weight, d_qkv_bias);
}
// tensor model parallel
AllReduce<T>(*d_x, ring_id, ctx.cuda_device_context());
}
if (add_residual) {
// gradient accumulation
std::vector<const phi::DenseTensor *> ins = {&d_residual, d_x};
std::vector<phi::DenseTensor *> outs = {d_x};
phi::funcs::ElementwiseKernel<T>(
ctx.cuda_device_context(), ins, &outs, phi::funcs::AddFunctor<T>());
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(fused_attention,
ops::FusedAttentionOpKernel<float>,
ops::FusedAttentionOpKernel<double>,
ops::FusedAttentionOpKernel<plat::float16>);
REGISTER_OP_CUDA_KERNEL(fused_attention_grad,
ops::FusedAttentionGradKernel<float>,
ops::FusedAttentionGradKernel<double>,
ops::FusedAttentionGradKernel<plat::float16>);
|
conv_ex2.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Master in Computer Vision - Module 5 - Exercise 2
*
* INSTRUCTIONS: if you compile this program you will notice that
* both png results coincide and are actually the same image.
* This is because this program is not yet finished.
*
* First you need to understand what the code is doing to produce the first image. It might help you understand
* how cuDNN works.
*
* The final goal is that you complete the code to produce succesive convolutions based on the previous result.
* This can be done by using a technique called "ping-pong buffers", so not new memory needs to be allocated.
* Here we reused d_input and d_output buffers in GPU, to do so we exchange their pointers so they can be reused
* to read and write the data respectively through succesive iterations.
*
* All you should need is to have a glance at the code and check cuDNN SDK documentation.
* Let's go for it!! Only 5 parameters are missing there.
*
* GOAL: Find the line saying
*
* "UNCOMMENT AND FILL THIS MISSING PARAMETERS FOR THIS CUDNN API CALL"
*
* uncomment and fill the missing parameters in order to produce 10 iterations
* where the output data of the convolution is convolved again with the proposed
* new filter for several times. After 10 iterations of doing this, the final result
* should look very similar (if not equal) to the one shown in the instructions slides.
*
*
*
* Original Author: Peter Goldsborough
* FreeImage porting / Iterative Conv. Exercise: Jose A. Iglesias-Guitian <jalley@cvc.uab.es>
* Convolutions with cuDNN
* Porting to FreeImage and new cuDNN
*
* http://www.goldsborough.me/cuda/ml/cudnn/c++/2017/10/01/14-37-23-convolutions_with_cudnn/
*
*/
#include <cudnn.h>
#include <cassert>
#include <cstdlib>
#include <iostream>
#include <FreeImage.h>
#define checkCUDNN(expression) \
{ \
cudnnStatus_t status = (expression); \
if (status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
void convert_to_freeimage(FIBITMAP* pBits/*destination*/, const unsigned int& dst_pitch, FIBITMAP* pSource, const int& width, const int& height)
{
for(int y = 0; y < FreeImage_GetHeight(pBits); y++) {
BYTE *pPixel = (BYTE *)FreeImage_GetScanLine(pBits, y);
FIRGBAF *rgba = (FIRGBAF *)FreeImage_GetScanLine(pSource, y);
for(int x = 0; x < FreeImage_GetWidth(pBits); x++)
{
pPixel[0] = 255 * rgba[x].blue;
pPixel[1] = 255 * rgba[x].green;
pPixel[2] = 255 * rgba[x].red;
pPixel[3] = 255;
pPixel += 4;
}
}
}
void save_image(const char* output_filename,
FIBITMAP* buffer,
int height,
int width) {
FIBITMAP* pBitmap = FreeImage_AllocateT(FIT_BITMAP, width, height, 8 * 4/*pixelsize*/);
unsigned int free_image_pitch = FreeImage_GetPitch(pBitmap);
std::cout << "Pitch: " << free_image_pitch << std::endl;
//BYTE *pBits = reinterpret_cast<BYTE*>(FreeImage_GetBits(pBitmap));
convert_to_freeimage(pBitmap/*destination*/, free_image_pitch, buffer/*source*/, FreeImage_GetWidth(pBitmap), FreeImage_GetHeight(pBitmap));
FreeImage_Save(FIF_PNG, pBitmap, output_filename, 0);
std::cerr << "Wrote output to " << output_filename << std::endl;
}
void save_RGB(FIBITMAP* pBits/*destination*/, float* pSource, const int& width, const int& height)
{
for(uint y = 0; y < FreeImage_GetHeight(pBits); y++) {
BYTE *pPixel = (BYTE *)FreeImage_GetScanLine(pBits, y);
for(uint x = 0; x < FreeImage_GetWidth(pBits); x++)
{
// input is 3 channels, output is 4 channels for PNG writting
pPixel[x*4+2] = max(0.f, min(255 * pSource[x*3+2], 255.f));
pPixel[x*4+1] = max(0.f, min(255 * pSource[x*3+1], 255.f));
pPixel[x*4+0] = max(0.f, min(255 * pSource[x*3+0], 255.f));
pPixel[x*4+3] = 255; //255 * pSource[x*4+3];
}
pSource += (width * 3);
}
}
void save_tensor_image(const char* output_filename,
float* buffer,
int height,
int width) {
FIBITMAP* pBitmap = FreeImage_AllocateT(FIT_BITMAP, width, height, 8 * 4/*pixelsize*/);
//unsigned int free_image_pitch = FreeImage_GetPitch(pBitmap);
//std::cout << "Pitch: " << free_image_pitch << std::endl;
//BYTE *pBits = reinterpret_cast<BYTE*>(FreeImage_GetBits(pBitmap));
save_RGB(pBitmap/*destination*/, buffer/*source*/, width, height);
FreeImage_Save(FIF_PNG, pBitmap, output_filename, 0);
std::cerr << "Wrote output to " << output_filename << std::endl;
}
FIBITMAP* load_image(const char* image_path) {
FREE_IMAGE_FORMAT format = FreeImage_GetFileType(image_path, 0);
FIBITMAP* image = FreeImage_Load(format, image_path);
return image;
}
int main(int argc, const char* argv[]) {
if (argc < 2) {
std::cerr << "usage: conv <image> [gpu=0] [sigmoid=0]" << std::endl;
std::exit(EXIT_FAILURE);
}
int gpu_id = (argc > 2) ? std::atoi(argv[2]) : 0;
std::cerr << "GPU: " << gpu_id << std::endl;
bool with_sigmoid = (argc > 3) ? std::atoi(argv[3]) : 0;
std::cerr << "With sigmoid: " << std::boolalpha << with_sigmoid << std::endl;
FIBITMAP* image = load_image(argv[1]);
int width = FreeImage_GetWidth(image);
int height = FreeImage_GetHeight(image);
float* tensor_image = new float[width*height*3];
for (uint i=0; i < height; i++)
{
BYTE *pPixel = (BYTE *)FreeImage_GetScanLine(image, i);
for (uint j=0; j < width; j++)
{
tensor_image[(i*width + j)*3 + 0] = (float)pPixel[j*3+0] / 255.0;
tensor_image[(i*width + j)*3 + 1] = (float)pPixel[j*3+1] / 255.0;
tensor_image[(i*width + j)*3 + 2] = (float)pPixel[j*3+2] / 255.0;
}
}
hipSetDevice(gpu_id);
cudnnHandle_t cudnn;
cudnnCreate(&cudnn);
cudnnTensorDescriptor_t input_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&input_descriptor));
checkCUDNN(cudnnSetTensor4dDescriptor(input_descriptor,
/*format=*/CUDNN_TENSOR_NHWC,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/3,
/*image_height=*/height,
/*image_width=*/width));
cudnnFilterDescriptor_t kernel_descriptor;
checkCUDNN(cudnnCreateFilterDescriptor(&kernel_descriptor));
checkCUDNN(cudnnSetFilter4dDescriptor(kernel_descriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/3,
/*in_channels=*/3,
/*kernel_height=*/3,
/*kernel_width=*/3));
cudnnConvolutionDescriptor_t convolution_descriptor;
checkCUDNN(cudnnCreateConvolutionDescriptor(&convolution_descriptor));
#if CUDNN_MAJOR < 6
std::cout << "CUDNN < 6" << std::endl;
checkCUDNN(cudnnSetConvolution2dDescriptor(convolution_descriptor,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*//*CUDNN_CROSS_CORRELATION*/CUDNN_CONVOLUTION));
#else
checkCUDNN(cudnnSetConvolution2dDescriptor(convolution_descriptor,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
/*computeType=*/CUDNN_DATA_FLOAT));
#endif
int batch_size{0}, channels{0}; //, height{0}, width{0};
checkCUDNN(cudnnGetConvolution2dForwardOutputDim(convolution_descriptor,
input_descriptor,
kernel_descriptor,
&batch_size,
&channels,
&height,
&width));
std::cerr << "Output Image: " << height << " x " << width << " x " << channels << " batch: " << batch_size
<< std::endl;
cudnnTensorDescriptor_t output_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&output_descriptor));
checkCUDNN(cudnnSetTensor4dDescriptor(output_descriptor,
/*format=*/CUDNN_TENSOR_NHWC,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/3,
/*image_height=*/height,
/*image_width=*/width));
cudnnConvolutionFwdAlgo_t convolution_algorithm;
checkCUDNN(
cudnnGetConvolutionForwardAlgorithm(cudnn,
input_descriptor,
kernel_descriptor,
convolution_descriptor,
output_descriptor,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST,
/*memoryLimitInBytes=*/0,
&convolution_algorithm));
size_t workspace_bytes{0};
checkCUDNN(cudnnGetConvolutionForwardWorkspaceSize(cudnn,
input_descriptor,
kernel_descriptor,
convolution_descriptor,
output_descriptor,
convolution_algorithm,
&workspace_bytes));
std::cerr << "Workspace size: " << (workspace_bytes / 1048576.0) << "MB"
<< std::endl;
assert(workspace_bytes > 0);
std::cerr << "Convolution Algorithm: " << convolution_algorithm << " type" << std::endl;
void* d_workspace{/*nullptr*/0};
hipMalloc(&d_workspace, workspace_bytes);
int image_bytes = batch_size * channels * height * width * sizeof(float);
float* d_input{/*nullptr*/0};
hipMalloc(&d_input, image_bytes);
//hipMemcpy(d_input, image.ptr<float>(0), image_bytes, hipMemcpyHostToDevice);
hipMemcpy(d_input, &(tensor_image[0]), image_bytes, hipMemcpyHostToDevice);
float* d_output{/*nullptr*/0};
hipMalloc(&d_output, image_bytes);
hipMemset(d_output, 0, image_bytes);
// clang-format off
const float kernel_template[3][3] = {
{1, 1, 1},
{1, -8, 1},
{1, 1, 1}
};
// clang-format on
float h_kernel[3][3][3][3];
for (int kernel = 0; kernel < 3; ++kernel) {
for (int channel = 0; channel < 3; ++channel) {
for (int row = 0; row < 3; ++row) {
for (int column = 0; column < 3; ++column) {
h_kernel[kernel][channel][row][column] = kernel_template[row][column];
}
}
}
}
float* d_kernel{/*nullptr*/0};
hipMalloc(&d_kernel, sizeof(h_kernel));
hipMemcpy(d_kernel, h_kernel, sizeof(h_kernel), hipMemcpyHostToDevice);
float alpha = 1.0f, beta = 0.0f;
checkCUDNN(cudnnConvolutionForward(cudnn,
&alpha,
input_descriptor,
d_input,
kernel_descriptor,
d_kernel,
convolution_descriptor,
convolution_algorithm,
d_workspace,
workspace_bytes,
&beta,
output_descriptor,
d_output));
if (with_sigmoid) {
cudnnActivationDescriptor_t activation_descriptor;
checkCUDNN(cudnnCreateActivationDescriptor(&activation_descriptor));
checkCUDNN(cudnnSetActivationDescriptor(activation_descriptor,
CUDNN_ACTIVATION_SIGMOID,
CUDNN_PROPAGATE_NAN,
/*relu_coef=*/0));
checkCUDNN(cudnnActivationForward(cudnn,
activation_descriptor,
&alpha,
output_descriptor,
d_output,
&beta,
output_descriptor,
d_output));
cudnnDestroyActivationDescriptor(activation_descriptor);
}
hipMemcpy(&(tensor_image[0]), d_output, image_bytes, hipMemcpyDeviceToHost);
save_tensor_image("cudnn-output.png", tensor_image, height, width);
// MAKE A SECOND CONVOLUTION
float* d_tmp{/*nullptr*/0};
hipMalloc(&d_tmp, image_bytes);
// NEW FILTER TEMPLATE
const float new_kernel_template[3][3] = {
{1, 2, 1},
{2, 4, 2},
{1, 2, 1}
};
// BUILD NEW KERNEL FILTER
float new_h_kernel[3][3][3][3];
for (int kernel = 0; kernel < 3; ++kernel) {
for (int channel = 0; channel < 3; ++channel) {
for (int row = 0; row < 3; ++row) {
for (int column = 0; column < 3; ++column) {
new_h_kernel[kernel][channel][row][column] = new_kernel_template[row][column];
}
}
}
}
float* d_new_kernel{/*nullptr*/0};
hipMalloc(&d_new_kernel, sizeof(new_h_kernel));
hipMemcpy(d_new_kernel, new_h_kernel, sizeof(new_h_kernel), hipMemcpyHostToDevice);
for (int i=0; i< 10; i++)
{
// SWAP VARIABLES
d_tmp = d_input;
d_input = d_output;
d_output = d_tmp;
// CODING EXERCISE STARTS HERE:
//////////////////////////////////////////////////////////////////////////////////
// UNCOMMENT AND FILL THIS MISSING PARAMETERS FOR THIS CUDNN API CALL:
//////////////////////////////////////////////////////////////////////////////////
checkCUDNN(cudnnConvolutionForward(cudnn,
&alpha,
input_descriptor,
d_input,
kernel_descriptor,
d_new_kernel,
convolution_descriptor,
convolution_algorithm,
d_workspace,
workspace_bytes,
&beta,
output_descriptor,
d_output));
// CODING EXERCISE SHOULD FINISH HERE.
}
hipMemcpy(&(tensor_image[0]), d_output, image_bytes, hipMemcpyDeviceToHost);
save_tensor_image("cudnn-your-output.png", tensor_image, height, width);
// Free resources
delete[] tensor_image;
FreeImage_Unload(image);
hipFree(d_kernel);
hipFree(d_new_kernel);
hipFree(d_input);
hipFree(d_output);
hipFree(d_tmp);
hipFree(d_workspace);
cudnnDestroyTensorDescriptor(input_descriptor);
cudnnDestroyTensorDescriptor(output_descriptor);
cudnnDestroyFilterDescriptor(kernel_descriptor);
cudnnDestroyConvolutionDescriptor(convolution_descriptor);
cudnnDestroy(cudnn);
}
| conv_ex2.cu | /*
* Master in Computer Vision - Module 5 - Exercise 2
*
* INSTRUCTIONS: if you compile this program you will notice that
* both png results coincide and are actually the same image.
* This is because this program is not yet finished.
*
* First you need to understand what the code is doing to produce the first image. It might help you understand
* how cuDNN works.
*
* The final goal is that you complete the code to produce succesive convolutions based on the previous result.
* This can be done by using a technique called "ping-pong buffers", so not new memory needs to be allocated.
* Here we reused d_input and d_output buffers in GPU, to do so we exchange their pointers so they can be reused
* to read and write the data respectively through succesive iterations.
*
* All you should need is to have a glance at the code and check cuDNN SDK documentation.
* Let's go for it!! Only 5 parameters are missing there.
*
* GOAL: Find the line saying
*
* "UNCOMMENT AND FILL THIS MISSING PARAMETERS FOR THIS CUDNN API CALL"
*
* uncomment and fill the missing parameters in order to produce 10 iterations
* where the output data of the convolution is convolved again with the proposed
* new filter for several times. After 10 iterations of doing this, the final result
* should look very similar (if not equal) to the one shown in the instructions slides.
*
*
*
* Original Author: Peter Goldsborough
* FreeImage porting / Iterative Conv. Exercise: Jose A. Iglesias-Guitian <jalley@cvc.uab.es>
* Convolutions with cuDNN
* Porting to FreeImage and new cuDNN
*
* http://www.goldsborough.me/cuda/ml/cudnn/c++/2017/10/01/14-37-23-convolutions_with_cudnn/
*
*/
#include <cudnn.h>
#include <cassert>
#include <cstdlib>
#include <iostream>
#include <FreeImage.h>
#define checkCUDNN(expression) \
{ \
cudnnStatus_t status = (expression); \
if (status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
void convert_to_freeimage(FIBITMAP* pBits/*destination*/, const unsigned int& dst_pitch, FIBITMAP* pSource, const int& width, const int& height)
{
for(int y = 0; y < FreeImage_GetHeight(pBits); y++) {
BYTE *pPixel = (BYTE *)FreeImage_GetScanLine(pBits, y);
FIRGBAF *rgba = (FIRGBAF *)FreeImage_GetScanLine(pSource, y);
for(int x = 0; x < FreeImage_GetWidth(pBits); x++)
{
pPixel[0] = 255 * rgba[x].blue;
pPixel[1] = 255 * rgba[x].green;
pPixel[2] = 255 * rgba[x].red;
pPixel[3] = 255;
pPixel += 4;
}
}
}
void save_image(const char* output_filename,
FIBITMAP* buffer,
int height,
int width) {
FIBITMAP* pBitmap = FreeImage_AllocateT(FIT_BITMAP, width, height, 8 * 4/*pixelsize*/);
unsigned int free_image_pitch = FreeImage_GetPitch(pBitmap);
std::cout << "Pitch: " << free_image_pitch << std::endl;
//BYTE *pBits = reinterpret_cast<BYTE*>(FreeImage_GetBits(pBitmap));
convert_to_freeimage(pBitmap/*destination*/, free_image_pitch, buffer/*source*/, FreeImage_GetWidth(pBitmap), FreeImage_GetHeight(pBitmap));
FreeImage_Save(FIF_PNG, pBitmap, output_filename, 0);
std::cerr << "Wrote output to " << output_filename << std::endl;
}
void save_RGB(FIBITMAP* pBits/*destination*/, float* pSource, const int& width, const int& height)
{
for(uint y = 0; y < FreeImage_GetHeight(pBits); y++) {
BYTE *pPixel = (BYTE *)FreeImage_GetScanLine(pBits, y);
for(uint x = 0; x < FreeImage_GetWidth(pBits); x++)
{
// input is 3 channels, output is 4 channels for PNG writting
pPixel[x*4+2] = max(0.f, min(255 * pSource[x*3+2], 255.f));
pPixel[x*4+1] = max(0.f, min(255 * pSource[x*3+1], 255.f));
pPixel[x*4+0] = max(0.f, min(255 * pSource[x*3+0], 255.f));
pPixel[x*4+3] = 255; //255 * pSource[x*4+3];
}
pSource += (width * 3);
}
}
void save_tensor_image(const char* output_filename,
float* buffer,
int height,
int width) {
FIBITMAP* pBitmap = FreeImage_AllocateT(FIT_BITMAP, width, height, 8 * 4/*pixelsize*/);
//unsigned int free_image_pitch = FreeImage_GetPitch(pBitmap);
//std::cout << "Pitch: " << free_image_pitch << std::endl;
//BYTE *pBits = reinterpret_cast<BYTE*>(FreeImage_GetBits(pBitmap));
save_RGB(pBitmap/*destination*/, buffer/*source*/, width, height);
FreeImage_Save(FIF_PNG, pBitmap, output_filename, 0);
std::cerr << "Wrote output to " << output_filename << std::endl;
}
FIBITMAP* load_image(const char* image_path) {
FREE_IMAGE_FORMAT format = FreeImage_GetFileType(image_path, 0);
FIBITMAP* image = FreeImage_Load(format, image_path);
return image;
}
int main(int argc, const char* argv[]) {
if (argc < 2) {
std::cerr << "usage: conv <image> [gpu=0] [sigmoid=0]" << std::endl;
std::exit(EXIT_FAILURE);
}
int gpu_id = (argc > 2) ? std::atoi(argv[2]) : 0;
std::cerr << "GPU: " << gpu_id << std::endl;
bool with_sigmoid = (argc > 3) ? std::atoi(argv[3]) : 0;
std::cerr << "With sigmoid: " << std::boolalpha << with_sigmoid << std::endl;
FIBITMAP* image = load_image(argv[1]);
int width = FreeImage_GetWidth(image);
int height = FreeImage_GetHeight(image);
float* tensor_image = new float[width*height*3];
for (uint i=0; i < height; i++)
{
BYTE *pPixel = (BYTE *)FreeImage_GetScanLine(image, i);
for (uint j=0; j < width; j++)
{
tensor_image[(i*width + j)*3 + 0] = (float)pPixel[j*3+0] / 255.0;
tensor_image[(i*width + j)*3 + 1] = (float)pPixel[j*3+1] / 255.0;
tensor_image[(i*width + j)*3 + 2] = (float)pPixel[j*3+2] / 255.0;
}
}
cudaSetDevice(gpu_id);
cudnnHandle_t cudnn;
cudnnCreate(&cudnn);
cudnnTensorDescriptor_t input_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&input_descriptor));
checkCUDNN(cudnnSetTensor4dDescriptor(input_descriptor,
/*format=*/CUDNN_TENSOR_NHWC,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/3,
/*image_height=*/height,
/*image_width=*/width));
cudnnFilterDescriptor_t kernel_descriptor;
checkCUDNN(cudnnCreateFilterDescriptor(&kernel_descriptor));
checkCUDNN(cudnnSetFilter4dDescriptor(kernel_descriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/3,
/*in_channels=*/3,
/*kernel_height=*/3,
/*kernel_width=*/3));
cudnnConvolutionDescriptor_t convolution_descriptor;
checkCUDNN(cudnnCreateConvolutionDescriptor(&convolution_descriptor));
#if CUDNN_MAJOR < 6
std::cout << "CUDNN < 6" << std::endl;
checkCUDNN(cudnnSetConvolution2dDescriptor(convolution_descriptor,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*//*CUDNN_CROSS_CORRELATION*/CUDNN_CONVOLUTION));
#else
checkCUDNN(cudnnSetConvolution2dDescriptor(convolution_descriptor,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
/*computeType=*/CUDNN_DATA_FLOAT));
#endif
int batch_size{0}, channels{0}; //, height{0}, width{0};
checkCUDNN(cudnnGetConvolution2dForwardOutputDim(convolution_descriptor,
input_descriptor,
kernel_descriptor,
&batch_size,
&channels,
&height,
&width));
std::cerr << "Output Image: " << height << " x " << width << " x " << channels << " batch: " << batch_size
<< std::endl;
cudnnTensorDescriptor_t output_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&output_descriptor));
checkCUDNN(cudnnSetTensor4dDescriptor(output_descriptor,
/*format=*/CUDNN_TENSOR_NHWC,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/3,
/*image_height=*/height,
/*image_width=*/width));
cudnnConvolutionFwdAlgo_t convolution_algorithm;
checkCUDNN(
cudnnGetConvolutionForwardAlgorithm(cudnn,
input_descriptor,
kernel_descriptor,
convolution_descriptor,
output_descriptor,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST,
/*memoryLimitInBytes=*/0,
&convolution_algorithm));
size_t workspace_bytes{0};
checkCUDNN(cudnnGetConvolutionForwardWorkspaceSize(cudnn,
input_descriptor,
kernel_descriptor,
convolution_descriptor,
output_descriptor,
convolution_algorithm,
&workspace_bytes));
std::cerr << "Workspace size: " << (workspace_bytes / 1048576.0) << "MB"
<< std::endl;
assert(workspace_bytes > 0);
std::cerr << "Convolution Algorithm: " << convolution_algorithm << " type" << std::endl;
void* d_workspace{/*nullptr*/0};
cudaMalloc(&d_workspace, workspace_bytes);
int image_bytes = batch_size * channels * height * width * sizeof(float);
float* d_input{/*nullptr*/0};
cudaMalloc(&d_input, image_bytes);
//cudaMemcpy(d_input, image.ptr<float>(0), image_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_input, &(tensor_image[0]), image_bytes, cudaMemcpyHostToDevice);
float* d_output{/*nullptr*/0};
cudaMalloc(&d_output, image_bytes);
cudaMemset(d_output, 0, image_bytes);
// clang-format off
const float kernel_template[3][3] = {
{1, 1, 1},
{1, -8, 1},
{1, 1, 1}
};
// clang-format on
float h_kernel[3][3][3][3];
for (int kernel = 0; kernel < 3; ++kernel) {
for (int channel = 0; channel < 3; ++channel) {
for (int row = 0; row < 3; ++row) {
for (int column = 0; column < 3; ++column) {
h_kernel[kernel][channel][row][column] = kernel_template[row][column];
}
}
}
}
float* d_kernel{/*nullptr*/0};
cudaMalloc(&d_kernel, sizeof(h_kernel));
cudaMemcpy(d_kernel, h_kernel, sizeof(h_kernel), cudaMemcpyHostToDevice);
float alpha = 1.0f, beta = 0.0f;
checkCUDNN(cudnnConvolutionForward(cudnn,
&alpha,
input_descriptor,
d_input,
kernel_descriptor,
d_kernel,
convolution_descriptor,
convolution_algorithm,
d_workspace,
workspace_bytes,
&beta,
output_descriptor,
d_output));
if (with_sigmoid) {
cudnnActivationDescriptor_t activation_descriptor;
checkCUDNN(cudnnCreateActivationDescriptor(&activation_descriptor));
checkCUDNN(cudnnSetActivationDescriptor(activation_descriptor,
CUDNN_ACTIVATION_SIGMOID,
CUDNN_PROPAGATE_NAN,
/*relu_coef=*/0));
checkCUDNN(cudnnActivationForward(cudnn,
activation_descriptor,
&alpha,
output_descriptor,
d_output,
&beta,
output_descriptor,
d_output));
cudnnDestroyActivationDescriptor(activation_descriptor);
}
cudaMemcpy(&(tensor_image[0]), d_output, image_bytes, cudaMemcpyDeviceToHost);
save_tensor_image("cudnn-output.png", tensor_image, height, width);
// MAKE A SECOND CONVOLUTION
float* d_tmp{/*nullptr*/0};
cudaMalloc(&d_tmp, image_bytes);
// NEW FILTER TEMPLATE
const float new_kernel_template[3][3] = {
{1, 2, 1},
{2, 4, 2},
{1, 2, 1}
};
// BUILD NEW KERNEL FILTER
float new_h_kernel[3][3][3][3];
for (int kernel = 0; kernel < 3; ++kernel) {
for (int channel = 0; channel < 3; ++channel) {
for (int row = 0; row < 3; ++row) {
for (int column = 0; column < 3; ++column) {
new_h_kernel[kernel][channel][row][column] = new_kernel_template[row][column];
}
}
}
}
float* d_new_kernel{/*nullptr*/0};
cudaMalloc(&d_new_kernel, sizeof(new_h_kernel));
cudaMemcpy(d_new_kernel, new_h_kernel, sizeof(new_h_kernel), cudaMemcpyHostToDevice);
for (int i=0; i< 10; i++)
{
// SWAP VARIABLES
d_tmp = d_input;
d_input = d_output;
d_output = d_tmp;
// CODING EXERCISE STARTS HERE:
//////////////////////////////////////////////////////////////////////////////////
// UNCOMMENT AND FILL THIS MISSING PARAMETERS FOR THIS CUDNN API CALL:
//////////////////////////////////////////////////////////////////////////////////
checkCUDNN(cudnnConvolutionForward(cudnn,
&alpha,
input_descriptor,
d_input,
kernel_descriptor,
d_new_kernel,
convolution_descriptor,
convolution_algorithm,
d_workspace,
workspace_bytes,
&beta,
output_descriptor,
d_output));
// CODING EXERCISE SHOULD FINISH HERE.
}
cudaMemcpy(&(tensor_image[0]), d_output, image_bytes, cudaMemcpyDeviceToHost);
save_tensor_image("cudnn-your-output.png", tensor_image, height, width);
// Free resources
delete[] tensor_image;
FreeImage_Unload(image);
cudaFree(d_kernel);
cudaFree(d_new_kernel);
cudaFree(d_input);
cudaFree(d_output);
cudaFree(d_tmp);
cudaFree(d_workspace);
cudnnDestroyTensorDescriptor(input_descriptor);
cudnnDestroyTensorDescriptor(output_descriptor);
cudnnDestroyFilterDescriptor(kernel_descriptor);
cudnnDestroyConvolutionDescriptor(convolution_descriptor);
cudnnDestroy(cudnn);
}
|
bab848a7cf03faf336640ca35d8f6416f0d205fe.hip | // !!! This is a file automatically generated by hipify!!!
#include "net.cuh"
#include "opencv2/opencv.hpp"
#include "common/cuMatrix.h"
#include <hip/hip_runtime.h>
#include "common/util.h"
#include <time.h>
#include "dataAugmentation/cuTrasformation.cuh"
#include "common/Config.h"
#include "common/cuMatrixVector.h"
#include <helper_functions.h>
#include <helper_cuda.h>
#include "common/MemoryMonitor.h"
#include "layers/Pooling.h"
#include "common/cuBase.h"
#include "layers/ConvCFM.h"
#include "layers/FullConnect.h"
#include "layers/SoftMax.h"
#include "layers/LayerBase.h"
#include "layers/LocalConnect.h"
#include "layers/LRN.h"
#include "layers/One.h"
#include "layers/BranchLayer.h"
#include "layers/CombineLayer.h"
#include "layers/DataLayer.h"
#include "layers/NIN.h"
//#include <thread>
#include <queue>
#include <set>
int cuCurCorrect;
cuMatrix<int>*cuCorrect = NULL;
cuMatrix<int>*cuVote = NULL;
std::vector<ConfigBase*>que;
void cuSaveConvNet()
{
FILE *pOut = fopen("Result/checkPoint.txt", "w");
for(int i = 0; i < (int)que.size(); i++){
LayerBase* layer = Layers::instance()->get(que[i]->m_name);
layer->save(pOut);
}
fclose(pOut);
};
void cuFreeConvNet()
{
}
void cuReadConvNet(
int imgDim,
const char* path,
int nclasses)
{
FILE *pIn = fopen(path, "r");
for(int i = 0; i < (int)que.size(); i++){
LayerBase* layer = Layers::instance()->get(que[i]->m_name);
layer->initFromCheckpoint(pIn);
}
fclose(pIn);
};
void buildNetWork(int trainLen, int testLen)
{
/*BFS*/
std::queue<ConfigBase*>qqq;
std::set<ConfigBase*> inque;
for(int i = 0; i < (int)Config::instance()->getFirstLayers().size(); i++){
qqq.push(Config::instance()->getFirstLayers()[i]);
inque.insert(Config::instance()->getFirstLayers()[i]);
}
char logStr[1024];
sprintf(logStr, "\n\n******************layer nexts start********************\n");
LOG(logStr, "Result/log.txt");
std::set<ConfigBase*>finish;
while(!qqq.empty()){
ConfigBase* top = qqq.front();
qqq.pop();
finish.insert(top);
que.push_back(top);
if(top->m_type == std::string("CONV")){
ConfigConv * conv = (ConfigConv*) top;
new ConvCFM(conv->m_name);
}else if(top->m_type == std::string("LOCAL")){
new LocalConnect(top->m_name);
}else if(top->m_type == std::string("BRANCHLAYER")){
new BranchLayer(top->m_name);
}else if(top->m_type == std::string("COMBINELAYER")){
ConfigCombineLayer *bl = static_cast<ConfigCombineLayer*>(top);
bool flag = true;
for(int i = 0; i < (int)bl->m_inputs.size(); i++){
ConfigBase* cb = Config::instance()->getLayerByName(bl->m_inputs[i]);
if(finish.find(cb) == finish.end()){
qqq.push(top);
flag = false;
finish.erase(top);
break;
}
}
if(flag == false) continue;
else new CombineLayer(top->m_name);
}else if(top->m_type == std::string("POOLING")){
new Pooling(top->m_name);
}else if(top->m_type == std::string("FC")){
new FullConnect(top->m_name);
}else if(top->m_type == std::string("SOFTMAX")){
new SoftMax(top->m_name);
}else if(top->m_type == std::string("ONE")){
new One(top->m_name);
}else if(std::string("LRN") == top->m_type){
new LRN(top->m_name);
}else if(std::string("DATA") == top->m_type){
new DataLayer(top->m_name);
}else if(std::string("NIN") == top->m_type){
new NIN(top->m_name);
}
sprintf(logStr, "layer %15s:", top->m_name.c_str());
LOG(logStr, "Result/log.txt");
for(int n = 0; n < (int)top->m_next.size(); n++){
if(inque.find(top->m_next[n]) == inque.end()){
qqq.push(top->m_next[n]);
inque.insert(top->m_next[n]);
}
sprintf(logStr, "%s ", top->m_next[n]->m_name.c_str());
LOG(logStr, "Result/log.txt");
}sprintf(logStr, "\n");
LOG(logStr, "Result/log.txt");
}
sprintf(logStr, "\n\n******************layer nexts end********************\n");
LOG(logStr, "Result/log.txt");
/*correct and cuVote*/
if(cuCorrect == NULL)
{
cuCorrect = new cuMatrix<int>(1,1,1);
cuVote = new cuMatrix<int>(testLen, Config::instance()->getClasses(), 1);
}
}
void cuFreeCNNMemory(
int batch,
cuMatrixVector<float>&trainX,
cuMatrixVector<float>&testX)
{
}
void updataWB()
{
/*updateWb*/
for(int i = 0; i < (int)que.size(); i++){
LayerBase* layer = Layers::instance()->get(que[i]->m_name);
layer->updateWeight();
}
hipStreamSynchronize(0);
getLastCudaError("updateWB");
}
void getNetworkCost(int* y)
{
/*feedforward*/
for(int i = 0; i < (int)que.size(); i++){
if(que[i]->m_type == std::string("SOFTMAX")){
SoftMax* sm = (SoftMax*)Layers::instance()->get(que[i]->m_name);
sm->setPredict(y);
}
}
for(int i = 0; i < (int)que.size(); i++){
LayerBase* layer = Layers::instance()->get(que[i]->m_name);
layer->feedforward();
}
/*backpropagation*/
//std::vector<std::thread>threads;
for(int i = (int)que.size() - 1; i >=0; i--){
ConfigBase* top = que[i];
LayerBase* layer = Layers::instance()->get(top->m_name);
//for(size_t i = 0; i < threads.size(); i++){
// threads[i].join();
//}
//threads.clear();
layer->backpropagation();
//threads.push_back(
// std::thread([layer](){
// layer->getGrad();
// }
//));
layer->getGrad();
}
//for(size_t i = 0; i < threads.size(); i++){
// threads[i].join();
//}
}
/*
dim3(1),dim3(batch)
*/
__global__ void g_getCorrect(float* softMaxP, int cols, int start, int* vote)
{
int id = threadIdx.x;
if(id < start)return;
float* p = softMaxP + id * cols;
int* votep= vote + id * cols;
int r = 0;
float maxele = log(p[0]);
for(int i = 1; i < cols; i++)
{
float val = log(p[i]);
if(maxele < val)
{
maxele = val;
r = i;
}
}
votep[r]++;
}
void resultProdict(int* vote,int start)
{
/*feedforward*/
for(int i = 0; i < (int)que.size(); i++){
LayerBase* layer = Layers::instance()->get(que[i]->m_name);
layer->feedforward();
}
for(int i = 0; i < (int)que.size(); i++){
if(que[i]->m_type == std::string("SOFTMAX")){
hipLaunchKernelGGL(( g_getCorrect), dim3(dim3(1)), dim3(Config::instance()->getBatchSize()), 0, 0,
Layers::instance()->get(que[i]->m_name)->getOutputs()->getDev(),
Layers::instance()->get(que[i]->m_name)->getOutputs()->cols,
start,
vote);
hipStreamSynchronize(0);
}
}
}
void gradientChecking(float**x,
int*y, int batch, int ImgSize, int nclasses, hipblasHandle_t handle)
{
/*for(int hl = 0; hl < hLayers.size(); hl++)
{
dropDelta(hLayers[hl].dropW, Config::instance()->getFC()[hl]->m_dropoutRate);
}
std::cout<<"test network !!!!"<<std::endl;
float epsilon = 1e-4;
for(int a = 0; a < convNCFM.size(); a++)
{
for(int b = 0; b < CLayers[a].layer.size(); b++)
{
sprintf(logStr, "====%d %d\n",a, b);
getNetworkCost(x,
y,
CLayers, hLayers,
smr,
batch, ImgSize, nclasses, handle);
CLayers[a].layer[b].Wgrad->toCpu();
cuMatrix<float>* grad = new cuMatrix<float>(CLayers[a].layer[b].Wgrad->getHost(), CLayers[a].layer[b].Wgrad->rows,
CLayers[a].layer[b].Wgrad->cols, CLayers[a].layer[b].Wgrad->channels);
for(int c = 0; c < CLayers[a].layer[b].W->channels; c++){
for(int i = 0; i < CLayers[a].layer[b].W->rows; i++){
for(int j = 0; j < CLayers[a].layer[b].W->cols; j++){
float memo = CLayers[a].layer[b].W->get(i, j, c);
CLayers[a].layer[b].W->set(i, j, c, memo + epsilon);
CLayers[a].layer[b].W->toGpu();
getNetworkCost(x, y, CLayers, hLayers, smr, batch, ImgSize, nclasses, handle);
smr.cost->toCpu();
float value1 = smr.cost->get(0, 0 , 0);
CLayers[a].layer[b].W->set(i, j, c, memo - epsilon);
CLayers[a].layer[b].W->toGpu();
getNetworkCost(x, y, CLayers, hLayers, smr, batch, ImgSize, nclasses, handle);
smr.cost->toCpu();
float value2 = smr.cost->get(0, 0, 0);
float tp = (value1 - value2) / (2 * epsilon);
if(fabs(tp - grad->get(i, j, c)) > 0.00001)
std::cout<<i<<","<<j<<","<<c<<","<<tp<<", "<<grad->get(i,j,c)<<", "
<<tp - grad->get(i,j,c)<<std::endl;
CLayers[a].layer[b].W->set(i, j, c, memo);
CLayers[a].layer[b].W->toGpu();
}
}
}
delete grad;
}
}*/
}
/*
*/
void __global__ g_getVotingResult(int* voting, int* y, int* correct, int len, int nclasses)
{
for(int i = 0; i < len; i += blockDim.x * gridDim.x)
{
int idx = i + blockDim.x * blockIdx.x + threadIdx.x;
if(idx < len)
{
int* pvoting = voting + idx * nclasses;
int _max = pvoting[0];
int rid = 0;
for(int j = 1; j < nclasses; j++)
{
if(pvoting[j] > _max)
{
_max = pvoting[j];
rid = j;
}
}
if(rid == y[idx])
{
atomicAdd(correct, 1);
}
}
}
}
void predictTestDate(cuMatrixVector<float>&x,
cuMatrix<int>*y ,
cuMatrixVector<float>&testX,
cuMatrix<int>* testY,
int batch,
int ImgSize,
int nclasses,
bool vote,
hipblasHandle_t handle) {
Config::instance()->setTraining(false);
int cropr[] = {Config::instance()->getCrop() / 2, 0, 0, Config::instance()->getCrop(), Config::instance()->getCrop()};
int cropc[] = {Config::instance()->getCrop() / 2, 0, Config::instance()->getCrop(), 0, Config::instance()->getCrop()};
float scalex[] = {0, -Config::instance()->getScale(), Config::instance()->getScale()};
float scaley[] = {0, -Config::instance()->getScale(), Config::instance()->getScale()};
float rotate[] = {0, -Config::instance()->getRotation(), Config::instance()->getRotation()};
int hlen = Config::instance()->getHorizontal() == 1 ? 2 : 1;
int clen = Config::instance()->getCrop() == 0 ? 1 : sizeof(cropc) / sizeof(int);
int scaleLen = Config::instance()->getScale() == 0 ? 1 : sizeof(scalex) / sizeof(float);
int rotateLen = Config::instance()->getRotation() == 0 ? 1 : sizeof(rotate) / sizeof(float);
if(!vote) hlen = clen = scaleLen = rotateLen = 1;
DataLayer *dl = static_cast<DataLayer*>(Layers::instance()->get("data"));
dl->getBatchImageWithStreams(x, 0);
cuVote->gpuClear();
for(int sidx = 0; sidx < scaleLen; sidx++){
for(int sidy = 0; sidy < scaleLen; sidy++){
for(int rid = 0; rid < rotateLen; rid++){
for (int h = 0; h < hlen; h++) {
for (int c = 0; c < clen; c++) {
dl->getBatchImageWithStreams(testX, 0);
for (int p = 0; p < ((int)testX.size() + batch - 1) / batch; p++) {
dl->synchronize();
printf("test %2d%%", 100 * p / (((int)testX.size() + batch - 1) / batch));
int tstart = p * batch;
if(tstart + batch <= (int)testX.size() - batch)
dl->getBatchImageWithStreams(testX, tstart + batch);
else {
int start = testX.size() - batch;
dl->getBatchImageWithStreams(testX, start);
}
if(tstart + batch > (int)testX.size()){
tstart = (int)testX.size() - batch;
}
dl->testData(cropr[c], cropc[c], rotate[rid], scalex[sidx], scaley[sidy], h);
resultProdict(cuVote->getDev() + tstart * nclasses,
p * batch - tstart);
printf("\b\b\b\b\b\b\b\b\b");
}
}
}
}
}
}
cuCorrect->gpuClear();
hipLaunchKernelGGL(( g_getVotingResult), dim3(dim3((testX.size() + batch - 1) / batch)), dim3(dim3(batch)), 0, 0,
cuVote->getDev(),
testY->getDev(),
cuCorrect->getDev(),
testX.size(),
nclasses);
hipStreamSynchronize(0);
getLastCudaError("g_getVotingResult");
cuCorrect->toCpu();
if (cuCorrect->get(0, 0, 0) > cuCurCorrect) {
cuCurCorrect = cuCorrect->get(0, 0, 0);
cuSaveConvNet();
}
}
float getCost(){
float cost = 0.0;
for(int i = 0; i < (int)que.size(); i++){
LayerBase* layer = (LayerBase*)Layers::instance()->get(que[i]->m_name);
layer->calCost();
layer->printCost();
cost += layer->getCost();
}
return cost;
}
void cuTrainNetwork(cuMatrixVector<float>&x,
cuMatrix<int>*y,
cuMatrixVector<float>&testX,
cuMatrix<int>* testY,
int batch,
int ImgSize,
int nclasses,
std::vector<float>&nlrate,
std::vector<float>&nMomentum,
std::vector<int>&epoCount,
hipblasHandle_t handle)
{
char logStr[1024];
if(nlrate.size() != nMomentum.size() || nMomentum.size() != epoCount.size() || nlrate.size() != epoCount.size())
{
printf("nlrate, nMomentum, epoCount size not equal\n");
exit(0);
}
if(Config::instance()->getIsGradientChecking())
gradientChecking(x.m_devPoint, y->getDev(), batch, ImgSize, nclasses, handle);
predictTestDate(x, y, testX, testY, batch, ImgSize, nclasses, 0, handle);
sprintf(logStr, "correct is %d\n", cuCorrect->get(0,0,0));
LOG(logStr, "Result/log.txt");
int epochs = 10000;
float lrate = 0.05f;
float Momentum = 0.9f;
int id = 0;
for (int epo = 0; epo < epochs; epo++) {
if (id >= (int)nlrate.size())
break;
lrate = nlrate[id];
Momentum = nMomentum[id];
Config::instance()->setLrate(lrate);
Config::instance()->setMomentum(Momentum);
float start, end;
start = (float)clock();
cuApplyRandom(batch, clock() + epo, ImgSize);
Config::instance()->setTraining(true);
x.shuffle(5000, y);
DataLayer *dl = static_cast<DataLayer*>(Layers::instance()->get("data"));
dl->getBatchImageWithStreams(x, 0);
for (int k = 0; k < ((int)x.size() + batch - 1) / batch; k ++) {
dl->synchronize();
int start = k * batch;
printf("train %2d%%", 100 * start / (((int)x.size() + batch - 1)));
if(start + batch <= (int)x.size() - batch)
dl->getBatchImageWithStreams(x, start + batch);
else{
int tstart = x.size() - batch;
dl->getBatchImageWithStreams(x, tstart);
}
if(start + batch > (int)x.size()){
start = (int)x.size() - batch;
}
dl->trainData();
getNetworkCost(y->getDev() + start);
updataWB();
printf("\b\b\b\b\b\b\b\b\b");
}
float cost = getCost();
end = (float)clock();
sprintf(logStr, "epoch=%d time=%.03lfs cost=%f Momentum=%.06lf lrate=%.08lf\n",
epo, (float) (end - start) / CLOCKS_PER_SEC,
cost,
Config::instance()->getMomentum(), Config::instance()->getLrate());
LOG(logStr, "Result/log.txt");
if (epo && epo % epoCount[id] == 0) {
id++;
}
sprintf(logStr, "===================weight value================\n");
LOG(logStr, "Result/log.txt");
for(int i = 0; i < (int)que.size(); i++){
LayerBase* layer = Layers::instance()->get(que[i]->m_name);
layer->printParameter();
}
sprintf(logStr, "===================test Result================\n");
LOG(logStr, "Result/log.txt");
predictTestDate(x, y, testX, testY,
batch, ImgSize, nclasses, false, handle);
sprintf(logStr, "test %.2lf%%/%.2lf%%\n", 100.0 * cuCorrect->get(0, 0, 0) / testX.size(),
100.0 * cuCurCorrect / testX.size());
LOG(logStr, "Result/log.txt");
if(epo && epo % Config::instance()->getTestEpoch() == 0){
predictTestDate(x, y, testX, testY,
batch, ImgSize, nclasses, true, handle);
sprintf(logStr, "test voting correct %.2lf%%/%.2lf%%\n", 100.0 * cuCorrect->get(0, 0, 0) / testX.size(),
100.0 * cuCurCorrect / testX.size());
LOG(logStr, "Result/log.txt");
}
if(epo == 0){
MemoryMonitor::instance()->printCpuMemory();
MemoryMonitor::instance()->printGpuMemory();
}
}
}
/*
*/
void __global__ g_getVoteAdd(int* voting, int* predict, int* y, int* correct, int len, int nclasses)
{
for(int i = 0; i < len; i += blockDim.x * gridDim.x)
{
int idx = i + blockDim.x * blockIdx.x + threadIdx.x;
if(idx < len)
{
int* pvoting = voting + idx * nclasses;
int* ppredict= predict+ idx * nclasses;
int _max = pvoting[0] + ppredict[0];
int rid = 0;
for(int j = 0; j < nclasses; j++)
{
pvoting[j] += ppredict[j];
if(pvoting[j] > _max)
{
_max = pvoting[j];
rid = j;
}
}
if(rid == y[idx])
{
atomicAdd(correct, 1);
}
}
}
}
int cuVoteAdd(cuMatrix<int>*& voteSum,
cuMatrix<int>*& predict,
cuMatrix<int>*& testY,
cuMatrix<int>*& correct,
int nclasses)
{
hipLaunchKernelGGL(( g_getVoteAdd), dim3(dim3((testY->getLen() + 256 - 1) / 256)), dim3(dim3(256)), 0, 0,
voteSum->getDev(),
predict->getDev(),
testY->getDev(),
correct->getDev(),
testY->getLen(),
nclasses);
hipStreamSynchronize(0);
getLastCudaError("g_getVoteAdd");
correct->toCpu();
return correct->get(0, 0, 0);
}
| bab848a7cf03faf336640ca35d8f6416f0d205fe.cu | #include "net.cuh"
#include "opencv2/opencv.hpp"
#include "common/cuMatrix.h"
#include <cuda_runtime.h>
#include "common/util.h"
#include <time.h>
#include "dataAugmentation/cuTrasformation.cuh"
#include "common/Config.h"
#include "common/cuMatrixVector.h"
#include <helper_functions.h>
#include <helper_cuda.h>
#include "common/MemoryMonitor.h"
#include "layers/Pooling.h"
#include "common/cuBase.h"
#include "layers/ConvCFM.h"
#include "layers/FullConnect.h"
#include "layers/SoftMax.h"
#include "layers/LayerBase.h"
#include "layers/LocalConnect.h"
#include "layers/LRN.h"
#include "layers/One.h"
#include "layers/BranchLayer.h"
#include "layers/CombineLayer.h"
#include "layers/DataLayer.h"
#include "layers/NIN.h"
//#include <thread>
#include <queue>
#include <set>
int cuCurCorrect;
cuMatrix<int>*cuCorrect = NULL;
cuMatrix<int>*cuVote = NULL;
std::vector<ConfigBase*>que;
void cuSaveConvNet()
{
FILE *pOut = fopen("Result/checkPoint.txt", "w");
for(int i = 0; i < (int)que.size(); i++){
LayerBase* layer = Layers::instance()->get(que[i]->m_name);
layer->save(pOut);
}
fclose(pOut);
};
void cuFreeConvNet()
{
}
void cuReadConvNet(
int imgDim,
const char* path,
int nclasses)
{
FILE *pIn = fopen(path, "r");
for(int i = 0; i < (int)que.size(); i++){
LayerBase* layer = Layers::instance()->get(que[i]->m_name);
layer->initFromCheckpoint(pIn);
}
fclose(pIn);
};
void buildNetWork(int trainLen, int testLen)
{
/*BFS*/
std::queue<ConfigBase*>qqq;
std::set<ConfigBase*> inque;
for(int i = 0; i < (int)Config::instance()->getFirstLayers().size(); i++){
qqq.push(Config::instance()->getFirstLayers()[i]);
inque.insert(Config::instance()->getFirstLayers()[i]);
}
char logStr[1024];
sprintf(logStr, "\n\n******************layer nexts start********************\n");
LOG(logStr, "Result/log.txt");
std::set<ConfigBase*>finish;
while(!qqq.empty()){
ConfigBase* top = qqq.front();
qqq.pop();
finish.insert(top);
que.push_back(top);
if(top->m_type == std::string("CONV")){
ConfigConv * conv = (ConfigConv*) top;
new ConvCFM(conv->m_name);
}else if(top->m_type == std::string("LOCAL")){
new LocalConnect(top->m_name);
}else if(top->m_type == std::string("BRANCHLAYER")){
new BranchLayer(top->m_name);
}else if(top->m_type == std::string("COMBINELAYER")){
ConfigCombineLayer *bl = static_cast<ConfigCombineLayer*>(top);
bool flag = true;
for(int i = 0; i < (int)bl->m_inputs.size(); i++){
ConfigBase* cb = Config::instance()->getLayerByName(bl->m_inputs[i]);
if(finish.find(cb) == finish.end()){
qqq.push(top);
flag = false;
finish.erase(top);
break;
}
}
if(flag == false) continue;
else new CombineLayer(top->m_name);
}else if(top->m_type == std::string("POOLING")){
new Pooling(top->m_name);
}else if(top->m_type == std::string("FC")){
new FullConnect(top->m_name);
}else if(top->m_type == std::string("SOFTMAX")){
new SoftMax(top->m_name);
}else if(top->m_type == std::string("ONE")){
new One(top->m_name);
}else if(std::string("LRN") == top->m_type){
new LRN(top->m_name);
}else if(std::string("DATA") == top->m_type){
new DataLayer(top->m_name);
}else if(std::string("NIN") == top->m_type){
new NIN(top->m_name);
}
sprintf(logStr, "layer %15s:", top->m_name.c_str());
LOG(logStr, "Result/log.txt");
for(int n = 0; n < (int)top->m_next.size(); n++){
if(inque.find(top->m_next[n]) == inque.end()){
qqq.push(top->m_next[n]);
inque.insert(top->m_next[n]);
}
sprintf(logStr, "%s ", top->m_next[n]->m_name.c_str());
LOG(logStr, "Result/log.txt");
}sprintf(logStr, "\n");
LOG(logStr, "Result/log.txt");
}
sprintf(logStr, "\n\n******************layer nexts end********************\n");
LOG(logStr, "Result/log.txt");
/*correct and cuVote*/
if(cuCorrect == NULL)
{
cuCorrect = new cuMatrix<int>(1,1,1);
cuVote = new cuMatrix<int>(testLen, Config::instance()->getClasses(), 1);
}
}
void cuFreeCNNMemory(
int batch,
cuMatrixVector<float>&trainX,
cuMatrixVector<float>&testX)
{
}
void updataWB()
{
/*updateWb*/
for(int i = 0; i < (int)que.size(); i++){
LayerBase* layer = Layers::instance()->get(que[i]->m_name);
layer->updateWeight();
}
cudaStreamSynchronize(0);
getLastCudaError("updateWB");
}
void getNetworkCost(int* y)
{
/*feedforward*/
for(int i = 0; i < (int)que.size(); i++){
if(que[i]->m_type == std::string("SOFTMAX")){
SoftMax* sm = (SoftMax*)Layers::instance()->get(que[i]->m_name);
sm->setPredict(y);
}
}
for(int i = 0; i < (int)que.size(); i++){
LayerBase* layer = Layers::instance()->get(que[i]->m_name);
layer->feedforward();
}
/*backpropagation*/
//std::vector<std::thread>threads;
for(int i = (int)que.size() - 1; i >=0; i--){
ConfigBase* top = que[i];
LayerBase* layer = Layers::instance()->get(top->m_name);
//for(size_t i = 0; i < threads.size(); i++){
// threads[i].join();
//}
//threads.clear();
layer->backpropagation();
//threads.push_back(
// std::thread([layer](){
// layer->getGrad();
// }
//));
layer->getGrad();
}
//for(size_t i = 0; i < threads.size(); i++){
// threads[i].join();
//}
}
/*
dim3(1),dim3(batch)
*/
__global__ void g_getCorrect(float* softMaxP, int cols, int start, int* vote)
{
int id = threadIdx.x;
if(id < start)return;
float* p = softMaxP + id * cols;
int* votep= vote + id * cols;
int r = 0;
float maxele = log(p[0]);
for(int i = 1; i < cols; i++)
{
float val = log(p[i]);
if(maxele < val)
{
maxele = val;
r = i;
}
}
votep[r]++;
}
void resultProdict(int* vote,int start)
{
/*feedforward*/
for(int i = 0; i < (int)que.size(); i++){
LayerBase* layer = Layers::instance()->get(que[i]->m_name);
layer->feedforward();
}
for(int i = 0; i < (int)que.size(); i++){
if(que[i]->m_type == std::string("SOFTMAX")){
g_getCorrect<<<dim3(1), Config::instance()->getBatchSize()>>>(
Layers::instance()->get(que[i]->m_name)->getOutputs()->getDev(),
Layers::instance()->get(que[i]->m_name)->getOutputs()->cols,
start,
vote);
cudaStreamSynchronize(0);
}
}
}
void gradientChecking(float**x,
int*y, int batch, int ImgSize, int nclasses, cublasHandle_t handle)
{
/*for(int hl = 0; hl < hLayers.size(); hl++)
{
dropDelta(hLayers[hl].dropW, Config::instance()->getFC()[hl]->m_dropoutRate);
}
std::cout<<"test network !!!!"<<std::endl;
float epsilon = 1e-4;
for(int a = 0; a < convNCFM.size(); a++)
{
for(int b = 0; b < CLayers[a].layer.size(); b++)
{
sprintf(logStr, "====%d %d\n",a, b);
getNetworkCost(x,
y,
CLayers, hLayers,
smr,
batch, ImgSize, nclasses, handle);
CLayers[a].layer[b].Wgrad->toCpu();
cuMatrix<float>* grad = new cuMatrix<float>(CLayers[a].layer[b].Wgrad->getHost(), CLayers[a].layer[b].Wgrad->rows,
CLayers[a].layer[b].Wgrad->cols, CLayers[a].layer[b].Wgrad->channels);
for(int c = 0; c < CLayers[a].layer[b].W->channels; c++){
for(int i = 0; i < CLayers[a].layer[b].W->rows; i++){
for(int j = 0; j < CLayers[a].layer[b].W->cols; j++){
float memo = CLayers[a].layer[b].W->get(i, j, c);
CLayers[a].layer[b].W->set(i, j, c, memo + epsilon);
CLayers[a].layer[b].W->toGpu();
getNetworkCost(x, y, CLayers, hLayers, smr, batch, ImgSize, nclasses, handle);
smr.cost->toCpu();
float value1 = smr.cost->get(0, 0 , 0);
CLayers[a].layer[b].W->set(i, j, c, memo - epsilon);
CLayers[a].layer[b].W->toGpu();
getNetworkCost(x, y, CLayers, hLayers, smr, batch, ImgSize, nclasses, handle);
smr.cost->toCpu();
float value2 = smr.cost->get(0, 0, 0);
float tp = (value1 - value2) / (2 * epsilon);
if(fabs(tp - grad->get(i, j, c)) > 0.00001)
std::cout<<i<<","<<j<<","<<c<<","<<tp<<", "<<grad->get(i,j,c)<<", "
<<tp - grad->get(i,j,c)<<std::endl;
CLayers[a].layer[b].W->set(i, j, c, memo);
CLayers[a].layer[b].W->toGpu();
}
}
}
delete grad;
}
}*/
}
/*
*/
void __global__ g_getVotingResult(int* voting, int* y, int* correct, int len, int nclasses)
{
for(int i = 0; i < len; i += blockDim.x * gridDim.x)
{
int idx = i + blockDim.x * blockIdx.x + threadIdx.x;
if(idx < len)
{
int* pvoting = voting + idx * nclasses;
int _max = pvoting[0];
int rid = 0;
for(int j = 1; j < nclasses; j++)
{
if(pvoting[j] > _max)
{
_max = pvoting[j];
rid = j;
}
}
if(rid == y[idx])
{
atomicAdd(correct, 1);
}
}
}
}
void predictTestDate(cuMatrixVector<float>&x,
cuMatrix<int>*y ,
cuMatrixVector<float>&testX,
cuMatrix<int>* testY,
int batch,
int ImgSize,
int nclasses,
bool vote,
cublasHandle_t handle) {
Config::instance()->setTraining(false);
int cropr[] = {Config::instance()->getCrop() / 2, 0, 0, Config::instance()->getCrop(), Config::instance()->getCrop()};
int cropc[] = {Config::instance()->getCrop() / 2, 0, Config::instance()->getCrop(), 0, Config::instance()->getCrop()};
float scalex[] = {0, -Config::instance()->getScale(), Config::instance()->getScale()};
float scaley[] = {0, -Config::instance()->getScale(), Config::instance()->getScale()};
float rotate[] = {0, -Config::instance()->getRotation(), Config::instance()->getRotation()};
int hlen = Config::instance()->getHorizontal() == 1 ? 2 : 1;
int clen = Config::instance()->getCrop() == 0 ? 1 : sizeof(cropc) / sizeof(int);
int scaleLen = Config::instance()->getScale() == 0 ? 1 : sizeof(scalex) / sizeof(float);
int rotateLen = Config::instance()->getRotation() == 0 ? 1 : sizeof(rotate) / sizeof(float);
if(!vote) hlen = clen = scaleLen = rotateLen = 1;
DataLayer *dl = static_cast<DataLayer*>(Layers::instance()->get("data"));
dl->getBatchImageWithStreams(x, 0);
cuVote->gpuClear();
for(int sidx = 0; sidx < scaleLen; sidx++){
for(int sidy = 0; sidy < scaleLen; sidy++){
for(int rid = 0; rid < rotateLen; rid++){
for (int h = 0; h < hlen; h++) {
for (int c = 0; c < clen; c++) {
dl->getBatchImageWithStreams(testX, 0);
for (int p = 0; p < ((int)testX.size() + batch - 1) / batch; p++) {
dl->synchronize();
printf("test %2d%%", 100 * p / (((int)testX.size() + batch - 1) / batch));
int tstart = p * batch;
if(tstart + batch <= (int)testX.size() - batch)
dl->getBatchImageWithStreams(testX, tstart + batch);
else {
int start = testX.size() - batch;
dl->getBatchImageWithStreams(testX, start);
}
if(tstart + batch > (int)testX.size()){
tstart = (int)testX.size() - batch;
}
dl->testData(cropr[c], cropc[c], rotate[rid], scalex[sidx], scaley[sidy], h);
resultProdict(cuVote->getDev() + tstart * nclasses,
p * batch - tstart);
printf("\b\b\b\b\b\b\b\b\b");
}
}
}
}
}
}
cuCorrect->gpuClear();
g_getVotingResult<<<dim3((testX.size() + batch - 1) / batch), dim3(batch)>>>(
cuVote->getDev(),
testY->getDev(),
cuCorrect->getDev(),
testX.size(),
nclasses);
cudaStreamSynchronize(0);
getLastCudaError("g_getVotingResult");
cuCorrect->toCpu();
if (cuCorrect->get(0, 0, 0) > cuCurCorrect) {
cuCurCorrect = cuCorrect->get(0, 0, 0);
cuSaveConvNet();
}
}
float getCost(){
float cost = 0.0;
for(int i = 0; i < (int)que.size(); i++){
LayerBase* layer = (LayerBase*)Layers::instance()->get(que[i]->m_name);
layer->calCost();
layer->printCost();
cost += layer->getCost();
}
return cost;
}
void cuTrainNetwork(cuMatrixVector<float>&x,
cuMatrix<int>*y,
cuMatrixVector<float>&testX,
cuMatrix<int>* testY,
int batch,
int ImgSize,
int nclasses,
std::vector<float>&nlrate,
std::vector<float>&nMomentum,
std::vector<int>&epoCount,
cublasHandle_t handle)
{
char logStr[1024];
if(nlrate.size() != nMomentum.size() || nMomentum.size() != epoCount.size() || nlrate.size() != epoCount.size())
{
printf("nlrate, nMomentum, epoCount size not equal\n");
exit(0);
}
if(Config::instance()->getIsGradientChecking())
gradientChecking(x.m_devPoint, y->getDev(), batch, ImgSize, nclasses, handle);
predictTestDate(x, y, testX, testY, batch, ImgSize, nclasses, 0, handle);
sprintf(logStr, "correct is %d\n", cuCorrect->get(0,0,0));
LOG(logStr, "Result/log.txt");
int epochs = 10000;
float lrate = 0.05f;
float Momentum = 0.9f;
int id = 0;
for (int epo = 0; epo < epochs; epo++) {
if (id >= (int)nlrate.size())
break;
lrate = nlrate[id];
Momentum = nMomentum[id];
Config::instance()->setLrate(lrate);
Config::instance()->setMomentum(Momentum);
float start, end;
start = (float)clock();
cuApplyRandom(batch, clock() + epo, ImgSize);
Config::instance()->setTraining(true);
x.shuffle(5000, y);
DataLayer *dl = static_cast<DataLayer*>(Layers::instance()->get("data"));
dl->getBatchImageWithStreams(x, 0);
for (int k = 0; k < ((int)x.size() + batch - 1) / batch; k ++) {
dl->synchronize();
int start = k * batch;
printf("train %2d%%", 100 * start / (((int)x.size() + batch - 1)));
if(start + batch <= (int)x.size() - batch)
dl->getBatchImageWithStreams(x, start + batch);
else{
int tstart = x.size() - batch;
dl->getBatchImageWithStreams(x, tstart);
}
if(start + batch > (int)x.size()){
start = (int)x.size() - batch;
}
dl->trainData();
getNetworkCost(y->getDev() + start);
updataWB();
printf("\b\b\b\b\b\b\b\b\b");
}
float cost = getCost();
end = (float)clock();
sprintf(logStr, "epoch=%d time=%.03lfs cost=%f Momentum=%.06lf lrate=%.08lf\n",
epo, (float) (end - start) / CLOCKS_PER_SEC,
cost,
Config::instance()->getMomentum(), Config::instance()->getLrate());
LOG(logStr, "Result/log.txt");
if (epo && epo % epoCount[id] == 0) {
id++;
}
sprintf(logStr, "===================weight value================\n");
LOG(logStr, "Result/log.txt");
for(int i = 0; i < (int)que.size(); i++){
LayerBase* layer = Layers::instance()->get(que[i]->m_name);
layer->printParameter();
}
sprintf(logStr, "===================test Result================\n");
LOG(logStr, "Result/log.txt");
predictTestDate(x, y, testX, testY,
batch, ImgSize, nclasses, false, handle);
sprintf(logStr, "test %.2lf%%/%.2lf%%\n", 100.0 * cuCorrect->get(0, 0, 0) / testX.size(),
100.0 * cuCurCorrect / testX.size());
LOG(logStr, "Result/log.txt");
if(epo && epo % Config::instance()->getTestEpoch() == 0){
predictTestDate(x, y, testX, testY,
batch, ImgSize, nclasses, true, handle);
sprintf(logStr, "test voting correct %.2lf%%/%.2lf%%\n", 100.0 * cuCorrect->get(0, 0, 0) / testX.size(),
100.0 * cuCurCorrect / testX.size());
LOG(logStr, "Result/log.txt");
}
if(epo == 0){
MemoryMonitor::instance()->printCpuMemory();
MemoryMonitor::instance()->printGpuMemory();
}
}
}
/*
*/
void __global__ g_getVoteAdd(int* voting, int* predict, int* y, int* correct, int len, int nclasses)
{
for(int i = 0; i < len; i += blockDim.x * gridDim.x)
{
int idx = i + blockDim.x * blockIdx.x + threadIdx.x;
if(idx < len)
{
int* pvoting = voting + idx * nclasses;
int* ppredict= predict+ idx * nclasses;
int _max = pvoting[0] + ppredict[0];
int rid = 0;
for(int j = 0; j < nclasses; j++)
{
pvoting[j] += ppredict[j];
if(pvoting[j] > _max)
{
_max = pvoting[j];
rid = j;
}
}
if(rid == y[idx])
{
atomicAdd(correct, 1);
}
}
}
}
int cuVoteAdd(cuMatrix<int>*& voteSum,
cuMatrix<int>*& predict,
cuMatrix<int>*& testY,
cuMatrix<int>*& correct,
int nclasses)
{
g_getVoteAdd<<<dim3((testY->getLen() + 256 - 1) / 256), dim3(256)>>>(
voteSum->getDev(),
predict->getDev(),
testY->getDev(),
correct->getDev(),
testY->getLen(),
nclasses);
cudaStreamSynchronize(0);
getLastCudaError("g_getVoteAdd");
correct->toCpu();
return correct->get(0, 0, 0);
}
|
b5da437c699e9c5ca1d16e905802b593aec13a37.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <rocblas.h>
// CUDA and CUBLAS functions
#include "device_launch_parameters.h"
#include "helper_string.h"
#include "helper_cuda.h"
#include "mex.h"
#include <stdio.h>
#include <hip/hip_runtime.h>
#ifndef min
#define min(a,b) ((a < b) ? a : b)
#endif
#ifndef max
#define max(a,b) ((a > b) ? a : b)
#endif
typedef struct _matrixSize // Optional Command-line multiplier for matrix sizes
{
unsigned int uiWA, uiHA, uiWB, uiHB, uiWC, uiHC;
} sMatrixSize;
bool initCuda(hipDeviceProp_t &deviceProp, size_t &avail, size_t needed)
{
// Initialize CUDA
int devID;
hipError_t error;
error = hipGetDevice(&devID);
if (error != hipSuccess)
{
mexPrintf("hipGetDevice returned error code %d, line(%d)\n", error, __LINE__);
return false;
}
error = hipGetDeviceProperties(&deviceProp, devID);
if (error != hipSuccess)
{
mexPrintf("hipGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
return false;
}
size_t total;
error = hipMemGetInfo(&avail, &total);
if (error != hipSuccess)
mexPrintf("GPU Device %d: \"%s\" with compute capability %d.%d, %.0f/%.0f MB, need %.0f MB \n", devID, deviceProp.name, deviceProp.major, deviceProp.minor, (double)(avail) / 1e6, (double)(total) / 1e6, (double)needed / 1e6);
return true;
}
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, const mxArray *prhs[]) {
hipError_t cudaStatus;
sMatrixSize matrix_size;
hipDeviceProp_t deviceProp;
if (nrhs < 2 || nlhs != 1)
{
mexPrintf("Use: [C] = CudaFastMult(A,B);");
return;
}
float *phaseBasis = (float*)mxGetData(prhs[0]);
float *K = (float*)mxGetData(prhs[1]);
const size_t *dimA = mxGetDimensions(prhs[0]);
const size_t *dimB = mxGetDimensions(prhs[1]);
if (!mxIsSingle(prhs[0]) || !mxIsSingle(prhs[1]))
{
mexPrintf("Currently supporting only single class variables\n");
return;
}
matrix_size.uiWA = dimA[1];
matrix_size.uiHA = dimA[0];
matrix_size.uiWB = dimB[1];
matrix_size.uiHB = dimB[0];
matrix_size.uiWC = matrix_size.uiWB;
matrix_size.uiHC = matrix_size.uiHA;
plhs[0] = mxCreateNumericMatrix(dimA[0], dimB[1], mxSINGLE_CLASS, mxREAL);
float *h_A = (float *)mxGetData(prhs[0]);
float *h_B = (float *)mxGetData(prhs[1]);
float *h_C = (float *)mxGetData(plhs[0]);
size_t size_A = matrix_size.uiWA * matrix_size.uiHA;
size_t mem_size_A = sizeof(float) * size_A;
size_t size_B = matrix_size.uiWB * matrix_size.uiHB;
size_t mem_size_B = sizeof(float) * size_B;
size_t size_C = matrix_size.uiWC * matrix_size.uiHC;
size_t mem_size_C = sizeof(float) * size_C;
size_t availMemory;
if (!initCuda(deviceProp, availMemory, mem_size_A + mem_size_B + mem_size_C))
return;
// matrix B is too big. Splitting it column wise.
int maximumColumnsInMemory = floor(0.4 * (availMemory - mem_size_A) / (matrix_size.uiHC + matrix_size.uiHB) / sizeof(float));
//floor(0.4*(availMemory - mem_size_A) / 2.0 / sizeof(float) / matrix_size.uiHC);
size_t memorychunk_B = min(maximumColumnsInMemory * matrix_size.uiHB * sizeof(float), mem_size_B);
size_t memorychunk_C = min(maximumColumnsInMemory * matrix_size.uiHC * sizeof(float), mem_size_C);
int numIterations = ceil((float)matrix_size.uiWC / maximumColumnsInMemory);
float *d_A, *d_B, *d_C;
checkCudaErrors(hipMalloc((void **)&d_A, mem_size_A));
checkCudaErrors(hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice));
// A Always stays in memory
checkCudaErrors(hipMalloc((void **)&d_C, memorychunk_C));
checkCudaErrors(hipMalloc((void **)&d_B, memorychunk_B));
hipblasHandle_t handle;
checkCudaErrors(hipblasCreate(&handle));
for (int iteration = 0; iteration < numIterations; iteration++)
{
int StartColumn = iteration*maximumColumnsInMemory;
int EndColumn = min((iteration + 1)*maximumColumnsInMemory - 1, matrix_size.uiWC - 1);
int numColumnstoCompute = EndColumn - StartColumn + 1;
//mexPrintf("Iteration %d/%d, Columns [%d-%d] of %d\n", 1+iteration, numIterations, StartColumn, EndColumn, matrix_size.uiWC - 1);
size_t mem_cropped_B = numColumnstoCompute * matrix_size.uiHB * sizeof(float);
size_t mem_offset_B = matrix_size.uiHB *StartColumn;
// Load sub-matrix of B into gpu memory
checkCudaErrors(hipMemcpy(d_B, h_B + mem_offset_B, mem_cropped_B, hipMemcpyHostToDevice));
const float alpha = 1.0f;
const float beta = 0.0f;
checkCudaErrors(hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, matrix_size.uiHA, numColumnstoCompute,
matrix_size.uiWA, &alpha, d_A, matrix_size.uiHA, d_B, matrix_size.uiHB, &beta, d_C, matrix_size.uiHC));
size_t mem_cropped_C = numColumnstoCompute * matrix_size.uiHC * sizeof(float);
size_t mem_offset_C = matrix_size.uiHC *StartColumn;
checkCudaErrors(hipDeviceSynchronize());
// copy result from device to host
checkCudaErrors(hipMemcpy(h_C + mem_offset_C, d_C, mem_cropped_C, hipMemcpyDeviceToHost));
}
checkCudaErrors(hipFree(d_A));
checkCudaErrors(hipFree(d_B));
checkCudaErrors(hipFree(d_C));
// Destroy the handle
checkCudaErrors(hipblasDestroy(handle));
hipDeviceReset();
return;
}
| b5da437c699e9c5ca1d16e905802b593aec13a37.cu | #include <cuda_runtime.h>
#include <cublas_v2.h>
// CUDA and CUBLAS functions
#include "device_launch_parameters.h"
#include "helper_string.h"
#include "helper_cuda.h"
#include "mex.h"
#include <stdio.h>
#include <cuda.h>
#ifndef min
#define min(a,b) ((a < b) ? a : b)
#endif
#ifndef max
#define max(a,b) ((a > b) ? a : b)
#endif
typedef struct _matrixSize // Optional Command-line multiplier for matrix sizes
{
unsigned int uiWA, uiHA, uiWB, uiHB, uiWC, uiHC;
} sMatrixSize;
bool initCuda(cudaDeviceProp &deviceProp, size_t &avail, size_t needed)
{
// Initialize CUDA
int devID;
cudaError_t error;
error = cudaGetDevice(&devID);
if (error != cudaSuccess)
{
mexPrintf("cudaGetDevice returned error code %d, line(%d)\n", error, __LINE__);
return false;
}
error = cudaGetDeviceProperties(&deviceProp, devID);
if (error != cudaSuccess)
{
mexPrintf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
return false;
}
size_t total;
error = cudaMemGetInfo(&avail, &total);
if (error != cudaSuccess)
mexPrintf("GPU Device %d: \"%s\" with compute capability %d.%d, %.0f/%.0f MB, need %.0f MB \n", devID, deviceProp.name, deviceProp.major, deviceProp.minor, (double)(avail) / 1e6, (double)(total) / 1e6, (double)needed / 1e6);
return true;
}
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, const mxArray *prhs[]) {
cudaError_t cudaStatus;
sMatrixSize matrix_size;
cudaDeviceProp deviceProp;
if (nrhs < 2 || nlhs != 1)
{
mexPrintf("Use: [C] = CudaFastMult(A,B);");
return;
}
float *phaseBasis = (float*)mxGetData(prhs[0]);
float *K = (float*)mxGetData(prhs[1]);
const size_t *dimA = mxGetDimensions(prhs[0]);
const size_t *dimB = mxGetDimensions(prhs[1]);
if (!mxIsSingle(prhs[0]) || !mxIsSingle(prhs[1]))
{
mexPrintf("Currently supporting only single class variables\n");
return;
}
matrix_size.uiWA = dimA[1];
matrix_size.uiHA = dimA[0];
matrix_size.uiWB = dimB[1];
matrix_size.uiHB = dimB[0];
matrix_size.uiWC = matrix_size.uiWB;
matrix_size.uiHC = matrix_size.uiHA;
plhs[0] = mxCreateNumericMatrix(dimA[0], dimB[1], mxSINGLE_CLASS, mxREAL);
float *h_A = (float *)mxGetData(prhs[0]);
float *h_B = (float *)mxGetData(prhs[1]);
float *h_C = (float *)mxGetData(plhs[0]);
size_t size_A = matrix_size.uiWA * matrix_size.uiHA;
size_t mem_size_A = sizeof(float) * size_A;
size_t size_B = matrix_size.uiWB * matrix_size.uiHB;
size_t mem_size_B = sizeof(float) * size_B;
size_t size_C = matrix_size.uiWC * matrix_size.uiHC;
size_t mem_size_C = sizeof(float) * size_C;
size_t availMemory;
if (!initCuda(deviceProp, availMemory, mem_size_A + mem_size_B + mem_size_C))
return;
// matrix B is too big. Splitting it column wise.
int maximumColumnsInMemory = floor(0.4 * (availMemory - mem_size_A) / (matrix_size.uiHC + matrix_size.uiHB) / sizeof(float));
//floor(0.4*(availMemory - mem_size_A) / 2.0 / sizeof(float) / matrix_size.uiHC);
size_t memorychunk_B = min(maximumColumnsInMemory * matrix_size.uiHB * sizeof(float), mem_size_B);
size_t memorychunk_C = min(maximumColumnsInMemory * matrix_size.uiHC * sizeof(float), mem_size_C);
int numIterations = ceil((float)matrix_size.uiWC / maximumColumnsInMemory);
float *d_A, *d_B, *d_C;
checkCudaErrors(cudaMalloc((void **)&d_A, mem_size_A));
checkCudaErrors(cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice));
// A Always stays in memory
checkCudaErrors(cudaMalloc((void **)&d_C, memorychunk_C));
checkCudaErrors(cudaMalloc((void **)&d_B, memorychunk_B));
cublasHandle_t handle;
checkCudaErrors(cublasCreate(&handle));
for (int iteration = 0; iteration < numIterations; iteration++)
{
int StartColumn = iteration*maximumColumnsInMemory;
int EndColumn = min((iteration + 1)*maximumColumnsInMemory - 1, matrix_size.uiWC - 1);
int numColumnstoCompute = EndColumn - StartColumn + 1;
//mexPrintf("Iteration %d/%d, Columns [%d-%d] of %d\n", 1+iteration, numIterations, StartColumn, EndColumn, matrix_size.uiWC - 1);
size_t mem_cropped_B = numColumnstoCompute * matrix_size.uiHB * sizeof(float);
size_t mem_offset_B = matrix_size.uiHB *StartColumn;
// Load sub-matrix of B into gpu memory
checkCudaErrors(cudaMemcpy(d_B, h_B + mem_offset_B, mem_cropped_B, cudaMemcpyHostToDevice));
const float alpha = 1.0f;
const float beta = 0.0f;
checkCudaErrors(cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, matrix_size.uiHA, numColumnstoCompute,
matrix_size.uiWA, &alpha, d_A, matrix_size.uiHA, d_B, matrix_size.uiHB, &beta, d_C, matrix_size.uiHC));
size_t mem_cropped_C = numColumnstoCompute * matrix_size.uiHC * sizeof(float);
size_t mem_offset_C = matrix_size.uiHC *StartColumn;
checkCudaErrors(cudaDeviceSynchronize());
// copy result from device to host
checkCudaErrors(cudaMemcpy(h_C + mem_offset_C, d_C, mem_cropped_C, cudaMemcpyDeviceToHost));
}
checkCudaErrors(cudaFree(d_A));
checkCudaErrors(cudaFree(d_B));
checkCudaErrors(cudaFree(d_C));
// Destroy the handle
checkCudaErrors(cublasDestroy(handle));
cudaDeviceReset();
return;
}
|
28886c56a2d3bf2227b6ae6dd533556ec857b843.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/flip_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void FlipKernel(const int num, const int channels, const int height, const int width,
const Dtype* src_data, Dtype* target_data, bool flip_height, bool flip_width) {
CUDA_KERNEL_LOOP(index, num * channels * height * width) {
int n = index / (channels * height * width);
int cs = index % (channels * height * width);
int c = cs / (height * width);
int s = cs % (height * width);
int h = s / width;
int w = s % width;
target_data[(((n * channels + c) * height + h) * width) + w] =
src_data[(((n * channels + c) * height + (flip_height ? (height - 1 - h) : h)) * width) + (flip_width ? (width - 1 - w) : w)];
}
}
template <typename Dtype>
void FlipLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int num = bottom[0]->num();
int channels = bottom[0]->channels();
int width = bottom[0]->width();
int height = bottom[0]->height();
FlipKernel<Dtype> << <CAFFE_GET_BLOCKS(num * channels * height * width),
CAFFE_CUDA_NUM_THREADS >> >(num, channels, height, width,
bottom_data, top_data, flip_height_, flip_width_);
}
template <typename Dtype>
void FlipLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
int num = bottom[0]->num();
int channels = bottom[0]->channels();
int width = bottom[0]->width();
int height = bottom[0]->height();
if (propagate_down[0]) {
FlipKernel<Dtype> << <CAFFE_GET_BLOCKS(num * channels * height * width),
CAFFE_CUDA_NUM_THREADS >> >(num, channels, height, width,
top_diff, bottom_diff, flip_height_, flip_width_);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(FlipLayer);
} // namespace caffe | 28886c56a2d3bf2227b6ae6dd533556ec857b843.cu | #include <vector>
#include "caffe/layers/flip_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void FlipKernel(const int num, const int channels, const int height, const int width,
const Dtype* src_data, Dtype* target_data, bool flip_height, bool flip_width) {
CUDA_KERNEL_LOOP(index, num * channels * height * width) {
int n = index / (channels * height * width);
int cs = index % (channels * height * width);
int c = cs / (height * width);
int s = cs % (height * width);
int h = s / width;
int w = s % width;
target_data[(((n * channels + c) * height + h) * width) + w] =
src_data[(((n * channels + c) * height + (flip_height ? (height - 1 - h) : h)) * width) + (flip_width ? (width - 1 - w) : w)];
}
}
template <typename Dtype>
void FlipLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int num = bottom[0]->num();
int channels = bottom[0]->channels();
int width = bottom[0]->width();
int height = bottom[0]->height();
FlipKernel<Dtype> << <CAFFE_GET_BLOCKS(num * channels * height * width),
CAFFE_CUDA_NUM_THREADS >> >(num, channels, height, width,
bottom_data, top_data, flip_height_, flip_width_);
}
template <typename Dtype>
void FlipLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
int num = bottom[0]->num();
int channels = bottom[0]->channels();
int width = bottom[0]->width();
int height = bottom[0]->height();
if (propagate_down[0]) {
FlipKernel<Dtype> << <CAFFE_GET_BLOCKS(num * channels * height * width),
CAFFE_CUDA_NUM_THREADS >> >(num, channels, height, width,
top_diff, bottom_diff, flip_height_, flip_width_);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(FlipLayer);
} // namespace caffe |
18a0e3e7693e65d59c0f873215961ae41e04fbf1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "./common/book.h"
#include "./common/cpu_bitmap.h"
#define DIM 1000
// https://devtalk.nvidia.com/default/topic/836926/how-to-compile-codes-on-cuda-opengl-interop-from-the-book-cuda-by-example-by-jason-sanders-amp-edw/
struct hipComplex {
float r;
float i;
__device__ hipComplex( float a, float b ) : r(a), i(b) {}
__device__ float magnitude2( void ) {
return r * r + i * i;
}
__device__ hipComplex operator*(const hipComplex& a) {
return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ hipComplex operator+(const hipComplex& a) {
return hipComplex(r+a.r, i+a.i);
}
};
__device__ int julia( int x, int y ) {
const float scale = 1.2;
float jx = scale * (float)(DIM/2 - x)/(DIM/2);
float jy = scale * (float)(DIM/2 - y)/(DIM/2);
hipComplex c(-0.8, 0.156);
hipComplex a(jx, jy);
int i = 0;
for (i=0; i<200; i++) {
a = a * a + c;
if (a.magnitude2() > 1000)
return 0;
}
return 1;
}
__global__ void kernel( unsigned char *ptr ) {
// map from blockIdx to pixel position
int x = blockIdx.x;
int y = blockIdx.y;
int offset = x + y * gridDim.x;
// now calculate the value at that position
int juliaValue = julia( x, y );
ptr[offset*4 + 0] = 255 * (1-juliaValue);
ptr[offset*4 + 1] = 255 * (1-juliaValue);
ptr[offset*4 + 2] = 255 * (1-juliaValue);
ptr[offset*4 + 3] = 255;
}
// globals needed by the update routine
struct DataBlock {
unsigned char *dev_bitmap;
};
int main( void ) {
DataBlock data;
CPUBitmap bitmap( DIM, DIM, &data );
unsigned char *dev_bitmap;
HANDLE_ERROR( hipMalloc( (void**)&dev_bitmap, bitmap.image_size() ) );
data.dev_bitmap = dev_bitmap;
dim3 grid(DIM,DIM);
hipLaunchKernelGGL(( kernel), dim3(grid),dim3(1), 0, 0, dev_bitmap);
hipMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), hipMemcpyDeviceToHost);
hipFree(dev_bitmap);
bitmap.display_and_exit();
} | 18a0e3e7693e65d59c0f873215961ae41e04fbf1.cu | #include "./common/book.h"
#include "./common/cpu_bitmap.h"
#define DIM 1000
// https://devtalk.nvidia.com/default/topic/836926/how-to-compile-codes-on-cuda-opengl-interop-from-the-book-cuda-by-example-by-jason-sanders-amp-edw/
struct cuComplex {
float r;
float i;
__device__ cuComplex( float a, float b ) : r(a), i(b) {}
__device__ float magnitude2( void ) {
return r * r + i * i;
}
__device__ cuComplex operator*(const cuComplex& a) {
return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ cuComplex operator+(const cuComplex& a) {
return cuComplex(r+a.r, i+a.i);
}
};
__device__ int julia( int x, int y ) {
const float scale = 1.2;
float jx = scale * (float)(DIM/2 - x)/(DIM/2);
float jy = scale * (float)(DIM/2 - y)/(DIM/2);
cuComplex c(-0.8, 0.156);
cuComplex a(jx, jy);
int i = 0;
for (i=0; i<200; i++) {
a = a * a + c;
if (a.magnitude2() > 1000)
return 0;
}
return 1;
}
__global__ void kernel( unsigned char *ptr ) {
// map from blockIdx to pixel position
int x = blockIdx.x;
int y = blockIdx.y;
int offset = x + y * gridDim.x;
// now calculate the value at that position
int juliaValue = julia( x, y );
ptr[offset*4 + 0] = 255 * (1-juliaValue);
ptr[offset*4 + 1] = 255 * (1-juliaValue);
ptr[offset*4 + 2] = 255 * (1-juliaValue);
ptr[offset*4 + 3] = 255;
}
// globals needed by the update routine
struct DataBlock {
unsigned char *dev_bitmap;
};
int main( void ) {
DataBlock data;
CPUBitmap bitmap( DIM, DIM, &data );
unsigned char *dev_bitmap;
HANDLE_ERROR( cudaMalloc( (void**)&dev_bitmap, bitmap.image_size() ) );
data.dev_bitmap = dev_bitmap;
dim3 grid(DIM,DIM);
kernel<<<grid,1>>>(dev_bitmap);
cudaMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), cudaMemcpyDeviceToHost);
cudaFree(dev_bitmap);
bitmap.display_and_exit();
} |
37fa15af4479dee0b155000429b070c1aee965b3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef _MSC_VER
#define _USE_MATH_DEFINES // For M_PI
#endif // _MSC_VER
#include <cmath>
#include "caffe2/operators/roi_align_rotated_op.h"
#include <stdio.h>
#include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
namespace {
template <typename T>
__device__ T bilinear_interpolate(
const T* bottom_data,
const int height,
const int width,
T y,
T x,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
return 0;
}
if (y <= 0) {
y = 0;
}
if (x <= 0) {
x = 0;
}
int y_low = (int)y;
int x_low = (int)x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = bottom_data[y_low * width + x_low];
T v2 = bottom_data[y_low * width + x_high];
T v3 = bottom_data[y_high * width + x_low];
T v4 = bottom_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__global__ void RoIAlignRotatedForward(
const int nthreads,
const T* bottom_data,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
const T* bottom_rois,
T* top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 6;
int roi_batch_ind = offset_bottom_rois[0];
// Do not round
T roi_center_w = offset_bottom_rois[1] * spatial_scale;
T roi_center_h = offset_bottom_rois[2] * spatial_scale;
T roi_width = offset_bottom_rois[3] * spatial_scale;
T roi_height = offset_bottom_rois[4] * spatial_scale;
T theta = offset_bottom_rois[5] * M_PI / 180.0;
// Force malformed ROIs to be 1x1
roi_width = c10::hip::compat::max(roi_width, (T)1.);
roi_height = c10::hip::compat::max(roi_height, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T* offset_bottom_data =
bottom_data + (roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// roi_start_h and roi_start_w are computed wrt the center of RoI (x, y).
// Appropriate translation needs to be applied after.
T roi_start_h = -roi_height / 2.0;
T roi_start_w = -roi_width / 2.0;
T cosTheta = cos(theta);
T sinTheta = sin(theta);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T yy = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T xx = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
// Rotate by theta around the center and translate
T x = xx * cosTheta + yy * sinTheta + roi_center_w;
T y = yy * cosTheta - xx * sinTheta + roi_center_h;
T val = bilinear_interpolate(
offset_bottom_data, height, width, y, x, index);
output_val += val;
}
}
output_val /= count;
top_data[index] = output_val;
}
}
} // namespace
template <>
bool RoIAlignRotatedOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Input data to pool
auto& R = Input(1); // RoIs
CAFFE_ENFORCE_EQ(order_, StorageOrder::NCHW, "RoIAlign CUDA impl needs NCHW");
if (R.numel() == 0) {
// Handle empty rois
Output(0, {0, X.dim32(1), pooled_height_, pooled_width_}, at::dtype<float>()); // RoI pooled data
return true;
}
CAFFE_ENFORCE_EQ(R.dim(), 2);
CAFFE_ENFORCE_EQ(R.dim32(1), 6);
assert(sampling_ratio_ >= 0);
auto* Y = Output(0, {R.dim32(0), X.dim32(1), pooled_height_, pooled_width_}, at::dtype<float>()); // RoI pooled data
int output_size = Y->numel();
hipLaunchKernelGGL(( RoIAlignRotatedForward<float>)
, dim3(CAFFE_GET_BLOCKS(output_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
output_size,
X.data<float>(),
spatial_scale_,
X.dim32(1),
X.dim32(2),
X.dim32(3),
pooled_height_,
pooled_width_,
sampling_ratio_,
R.data<float>(),
Y->mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(RoIAlignRotated, RoIAlignRotatedOp<float, CUDAContext>);
} // namespace caffe2
| 37fa15af4479dee0b155000429b070c1aee965b3.cu | #ifdef _MSC_VER
#define _USE_MATH_DEFINES // For M_PI
#endif // _MSC_VER
#include <cmath>
#include "caffe2/operators/roi_align_rotated_op.h"
#include <stdio.h>
#include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
namespace {
template <typename T>
__device__ T bilinear_interpolate(
const T* bottom_data,
const int height,
const int width,
T y,
T x,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
return 0;
}
if (y <= 0) {
y = 0;
}
if (x <= 0) {
x = 0;
}
int y_low = (int)y;
int x_low = (int)x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = bottom_data[y_low * width + x_low];
T v2 = bottom_data[y_low * width + x_high];
T v3 = bottom_data[y_high * width + x_low];
T v4 = bottom_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__global__ void RoIAlignRotatedForward(
const int nthreads,
const T* bottom_data,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
const T* bottom_rois,
T* top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 6;
int roi_batch_ind = offset_bottom_rois[0];
// Do not round
T roi_center_w = offset_bottom_rois[1] * spatial_scale;
T roi_center_h = offset_bottom_rois[2] * spatial_scale;
T roi_width = offset_bottom_rois[3] * spatial_scale;
T roi_height = offset_bottom_rois[4] * spatial_scale;
T theta = offset_bottom_rois[5] * M_PI / 180.0;
// Force malformed ROIs to be 1x1
roi_width = c10::cuda::compat::max(roi_width, (T)1.);
roi_height = c10::cuda::compat::max(roi_height, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T* offset_bottom_data =
bottom_data + (roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// roi_start_h and roi_start_w are computed wrt the center of RoI (x, y).
// Appropriate translation needs to be applied after.
T roi_start_h = -roi_height / 2.0;
T roi_start_w = -roi_width / 2.0;
T cosTheta = cos(theta);
T sinTheta = sin(theta);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T yy = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T xx = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
// Rotate by theta around the center and translate
T x = xx * cosTheta + yy * sinTheta + roi_center_w;
T y = yy * cosTheta - xx * sinTheta + roi_center_h;
T val = bilinear_interpolate(
offset_bottom_data, height, width, y, x, index);
output_val += val;
}
}
output_val /= count;
top_data[index] = output_val;
}
}
} // namespace
template <>
bool RoIAlignRotatedOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Input data to pool
auto& R = Input(1); // RoIs
CAFFE_ENFORCE_EQ(order_, StorageOrder::NCHW, "RoIAlign CUDA impl needs NCHW");
if (R.numel() == 0) {
// Handle empty rois
Output(0, {0, X.dim32(1), pooled_height_, pooled_width_}, at::dtype<float>()); // RoI pooled data
return true;
}
CAFFE_ENFORCE_EQ(R.dim(), 2);
CAFFE_ENFORCE_EQ(R.dim32(1), 6);
assert(sampling_ratio_ >= 0);
auto* Y = Output(0, {R.dim32(0), X.dim32(1), pooled_height_, pooled_width_}, at::dtype<float>()); // RoI pooled data
int output_size = Y->numel();
RoIAlignRotatedForward<float>
<<<CAFFE_GET_BLOCKS(output_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
output_size,
X.data<float>(),
spatial_scale_,
X.dim32(1),
X.dim32(2),
X.dim32(3),
pooled_height_,
pooled_width_,
sampling_ratio_,
R.data<float>(),
Y->mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(RoIAlignRotated, RoIAlignRotatedOp<float, CUDAContext>);
} // namespace caffe2
|
864950e27a0fa4dde3add6aa6bc9ff457d2a89d5.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include "datadef.h"
#include "wfloat3.h"
#include "warp_device.cuh"
#include "check_cuda.h"
__global__ void fission_kernel(unsigned N, unsigned starting_index, cross_section_data* d_xsdata, particle_data* d_particles, unsigned* d_remap){
// declare shared variables
//__shared__ unsigned n_isotopes;
//__shared__ unsigned energy_grid_len;
//__shared__ unsigned total_reaction_channels;
//__shared__ float* energy_grid;
__shared__ dist_container* dist_scatter;
__shared__ unsigned* rxn;
__shared__ unsigned* rn_bank;
__shared__ unsigned* yield;
__shared__ float* weight;
__shared__ float* E;
__shared__ unsigned* index;
// have thread 0 of block copy all pointers and static info into shared memory
if (threadIdx.x == 0){
//n_isotopes = d_xsdata[0].n_isotopes;
//energy_grid_len = d_xsdata[0].energy_grid_len;
//total_reaction_channels = d_xsdata[0].total_reaction_channels;
//energy_grid = d_xsdata[0].energy_grid;
dist_scatter = d_xsdata[0].dist_scatter;
rxn = d_particles[0].rxn;
rn_bank = d_particles[0].rn_bank;
yield = d_particles[0].yield;
weight = d_particles[0].weight;
index = d_particles[0].index;
E = d_particles[0].E;
}
// make sure shared loads happen before anything else
__syncthreads();
// return immediately if out of bounds
int tid_in = threadIdx.x+blockIdx.x*blockDim.x;
if (tid_in >= N){return;}
//remap to active
int tid = d_remap[starting_index + tid_in];
unsigned this_rxn = rxn[ starting_index + tid_in];
// print and return if wrong
if ( this_rxn != 818 & this_rxn != 819 & this_rxn != 820 & this_rxn != 821 & this_rxn != 838 ){printf("fission kernel accessing wrong reaction @ dex %u dex_in %u rxn %u\n",tid, tid_in,this_rxn);return;}
// load history data
unsigned this_dex = index[ tid];
unsigned rn = rn_bank[tid];
float this_weight = weight[ tid];
float this_E = E[ tid];
// local variables, load nu from scattering dist variables
if (dist_scatter[this_dex].lower==0x0){
printf("scatter pointer for rxn %d is null!\n",this_rxn);
}
dist_data sdist_lower = dist_scatter[this_dex].lower[0];
dist_data sdist_upper = dist_scatter[this_dex].upper[0];
float nu_t0 = 0.0;
float nu_t1 = 0.0;
float e0 = 0.0;
float e1 = 0.0;
float nu = 0.0;
unsigned inu = 0;
unsigned this_yield = 0;
//unsigned n_columns = n_isotopes + total_reaction_channels;
// copy nu values, energy points from dist, t is len, d is law
memcpy(&nu_t0 , &sdist_lower.len, 1*sizeof(float));
memcpy(&nu_t1 , &sdist_upper.len, 1*sizeof(float));
memcpy(&e0 , &sdist_lower.erg, 1*sizeof(float));
memcpy(&e1 , &sdist_upper.erg, 1*sizeof(float));
// interpolate nu total, energy is done in pop
nu = interpolate_linear_energy( this_E, e0, e1, nu_t0, nu_t1 );
if( (this_E > e1 | this_E < e0) & (e0 != e1) ){printf("OUTSIDE bounds in fission! this_E %6.4E e0 %6.4E e1 %6.4E\n",this_E,e0,e1);}
//printf("(this_E,nu) % 6.4E % 6.4E \n",this_E,nu);
// check nu
if (nu==0.0){
nu=2.8;
printf("something is wrong with fission yields, nu = %6.4E, guessing %4.2f, rxn %u\n",0.0,nu,this_rxn);
}
// multiply nu by weight
nu = this_weight * nu;
if (this_weight<1.0){
printf("weight = 0!\n");
}
// get integer part
inu = (unsigned) nu;
// sample floor or ceil based on fractional part
if((float)inu+get_rand(&rn) <= nu){
this_yield = inu+1;
}
else{
this_yield = inu;
}
// put in 900 block to terminate on next sort
this_rxn += 100;
//printf("tid %d rxn %u wgt %6.4E yield %u\n", tid, this_rxn, this_weight, this_yield);
// write
yield[ tid] = this_yield;
rn_bank[tid] = rn;
rxn[starting_index + tid_in] = this_rxn;
}
/**
* \brief a
* \details b
*
* @param[in] stream - CUDA stream to launch the kernel on
* @param[in] NUM_THREADS - the number of threads to run per thread block
* @param[in] N - the total number of threads to launch on the grid for fission
* @param[in] starting_index - starting index of the fission block in the remap vector
* @param[in] d_xsdata - device pointer to cross section data pointer array
* @param[in] d_particles - device pointer to particle data pointer array
* @param[in] d_remap - device pointer to data remapping vector
*/
void fission( hipStream_t stream, unsigned NUM_THREADS, unsigned N, unsigned starting_index, cross_section_data* d_xsdata, particle_data* d_particles, unsigned* d_remap){
if(N<1){return;}
unsigned blks = ( N + NUM_THREADS - 1 ) / NUM_THREADS;
hipLaunchKernelGGL(( fission_kernel) , dim3(blks), dim3(NUM_THREADS) , 0 , stream , N, starting_index, d_xsdata, d_particles, d_remap );
check_cuda(hipDeviceSynchronize());
}
| 864950e27a0fa4dde3add6aa6bc9ff457d2a89d5.cu | #include <cuda.h>
#include <stdio.h>
#include "datadef.h"
#include "wfloat3.h"
#include "warp_device.cuh"
#include "check_cuda.h"
__global__ void fission_kernel(unsigned N, unsigned starting_index, cross_section_data* d_xsdata, particle_data* d_particles, unsigned* d_remap){
// declare shared variables
//__shared__ unsigned n_isotopes;
//__shared__ unsigned energy_grid_len;
//__shared__ unsigned total_reaction_channels;
//__shared__ float* energy_grid;
__shared__ dist_container* dist_scatter;
__shared__ unsigned* rxn;
__shared__ unsigned* rn_bank;
__shared__ unsigned* yield;
__shared__ float* weight;
__shared__ float* E;
__shared__ unsigned* index;
// have thread 0 of block copy all pointers and static info into shared memory
if (threadIdx.x == 0){
//n_isotopes = d_xsdata[0].n_isotopes;
//energy_grid_len = d_xsdata[0].energy_grid_len;
//total_reaction_channels = d_xsdata[0].total_reaction_channels;
//energy_grid = d_xsdata[0].energy_grid;
dist_scatter = d_xsdata[0].dist_scatter;
rxn = d_particles[0].rxn;
rn_bank = d_particles[0].rn_bank;
yield = d_particles[0].yield;
weight = d_particles[0].weight;
index = d_particles[0].index;
E = d_particles[0].E;
}
// make sure shared loads happen before anything else
__syncthreads();
// return immediately if out of bounds
int tid_in = threadIdx.x+blockIdx.x*blockDim.x;
if (tid_in >= N){return;}
//remap to active
int tid = d_remap[starting_index + tid_in];
unsigned this_rxn = rxn[ starting_index + tid_in];
// print and return if wrong
if ( this_rxn != 818 & this_rxn != 819 & this_rxn != 820 & this_rxn != 821 & this_rxn != 838 ){printf("fission kernel accessing wrong reaction @ dex %u dex_in %u rxn %u\n",tid, tid_in,this_rxn);return;}
// load history data
unsigned this_dex = index[ tid];
unsigned rn = rn_bank[tid];
float this_weight = weight[ tid];
float this_E = E[ tid];
// local variables, load nu from scattering dist variables
if (dist_scatter[this_dex].lower==0x0){
printf("scatter pointer for rxn %d is null!\n",this_rxn);
}
dist_data sdist_lower = dist_scatter[this_dex].lower[0];
dist_data sdist_upper = dist_scatter[this_dex].upper[0];
float nu_t0 = 0.0;
float nu_t1 = 0.0;
float e0 = 0.0;
float e1 = 0.0;
float nu = 0.0;
unsigned inu = 0;
unsigned this_yield = 0;
//unsigned n_columns = n_isotopes + total_reaction_channels;
// copy nu values, energy points from dist, t is len, d is law
memcpy(&nu_t0 , &sdist_lower.len, 1*sizeof(float));
memcpy(&nu_t1 , &sdist_upper.len, 1*sizeof(float));
memcpy(&e0 , &sdist_lower.erg, 1*sizeof(float));
memcpy(&e1 , &sdist_upper.erg, 1*sizeof(float));
// interpolate nu total, energy is done in pop
nu = interpolate_linear_energy( this_E, e0, e1, nu_t0, nu_t1 );
if( (this_E > e1 | this_E < e0) & (e0 != e1) ){printf("OUTSIDE bounds in fission! this_E %6.4E e0 %6.4E e1 %6.4E\n",this_E,e0,e1);}
//printf("(this_E,nu) % 6.4E % 6.4E \n",this_E,nu);
// check nu
if (nu==0.0){
nu=2.8;
printf("something is wrong with fission yields, nu = %6.4E, guessing %4.2f, rxn %u\n",0.0,nu,this_rxn);
}
// multiply nu by weight
nu = this_weight * nu;
if (this_weight<1.0){
printf("weight = 0!\n");
}
// get integer part
inu = (unsigned) nu;
// sample floor or ceil based on fractional part
if((float)inu+get_rand(&rn) <= nu){
this_yield = inu+1;
}
else{
this_yield = inu;
}
// put in 900 block to terminate on next sort
this_rxn += 100;
//printf("tid %d rxn %u wgt %6.4E yield %u\n", tid, this_rxn, this_weight, this_yield);
// write
yield[ tid] = this_yield;
rn_bank[tid] = rn;
rxn[starting_index + tid_in] = this_rxn;
}
/**
* \brief a
* \details b
*
* @param[in] stream - CUDA stream to launch the kernel on
* @param[in] NUM_THREADS - the number of threads to run per thread block
* @param[in] N - the total number of threads to launch on the grid for fission
* @param[in] starting_index - starting index of the fission block in the remap vector
* @param[in] d_xsdata - device pointer to cross section data pointer array
* @param[in] d_particles - device pointer to particle data pointer array
* @param[in] d_remap - device pointer to data remapping vector
*/
void fission( cudaStream_t stream, unsigned NUM_THREADS, unsigned N, unsigned starting_index, cross_section_data* d_xsdata, particle_data* d_particles, unsigned* d_remap){
if(N<1){return;}
unsigned blks = ( N + NUM_THREADS - 1 ) / NUM_THREADS;
fission_kernel <<< blks, NUM_THREADS , 0 , stream >>> ( N, starting_index, d_xsdata, d_particles, d_remap );
check_cuda(cudaThreadSynchronize());
}
|
13267276e0fe2d02a766c6f8be3e0d60083692d0.hip | // !!! This is a file automatically generated by hipify!!!
#include<curd_lib_host.h>
#include<curd_lib_host.h>
#include<curd_lib_host.h>
#include<curd_lib_host.h>
#ifndef _BACKPROP_CUDA_KERNEL_H_
#define _BACKPROP_CUDA_KERNEL_H_
#include <stdio.h>
#include "backprop.h"
#include "math.h"
#include "hip/hip_runtime.h"
__global__ void
bpnn_layerforward_CUDA(float *input_cuda,
float *output_hidden_cuda,
float *input_hidden_cuda,
float *hidden_partial_sum,
int in,
int hid)
{
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ;
int index_in = HEIGHT * by + ty + 1;
__shared__ float input_node[HEIGHT];
__shared__ float weight_matrix[HEIGHT][WIDTH];
if ( tx == 0 )
input_node[ty] = input_cuda[index_in] ;
__syncthreads();
weight_matrix[ty][tx] = input_hidden_cuda[index];
__syncthreads();
weight_matrix[ty][tx] = weight_matrix[ty][tx] * input_node[ty];
__syncthreads();
for ( int i = 1 ; i <= __log2f(HEIGHT) ; i++){
int power_two = __powf(2, i);
if( ty % power_two == 0 )
weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + power_two/2][tx];
__syncthreads();
}
//__syncthreads();
input_hidden_cuda[index] = weight_matrix[ty][tx];
/*
for ( unsigned int i = 2 ; i <= HEIGHT ; i *= 2){
unsigned int power_two = i - 1;
if( (ty & power_two) == 0 ) {
weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + power_two/2][tx];
}
}
*/
__syncthreads();
if ( tx == 0 ) {
hidden_partial_sum[by * hid + ty] = weight_matrix[tx][ty];
}
}
__global__ void bpnn_adjust_weights_cuda(float * delta,
int hid,
float * ly,
int in,
float * w,
float * oldw)
{
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ;
int index_y = HEIGHT * by + ty + 1;
int index_x = tx + 1;
//eta = 0.3;
//momentum = 0.3;
w[index] += ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index]));
oldw[index] = ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index]));
__syncthreads();
if (ty == 0 && by ==0){
w[index_x] += ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x]));
oldw[index_x] = ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x]));
}
}
#endif
| 13267276e0fe2d02a766c6f8be3e0d60083692d0.cu | #include<curd_lib_host.h>
#include<curd_lib_host.h>
#include<curd_lib_host.h>
#include<curd_lib_host.h>
#ifndef _BACKPROP_CUDA_KERNEL_H_
#define _BACKPROP_CUDA_KERNEL_H_
#include <stdio.h>
#include "backprop.h"
#include "math.h"
#include "cuda.h"
__global__ void
bpnn_layerforward_CUDA(float *input_cuda,
float *output_hidden_cuda,
float *input_hidden_cuda,
float *hidden_partial_sum,
int in,
int hid)
{
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ;
int index_in = HEIGHT * by + ty + 1;
__shared__ float input_node[HEIGHT];
__shared__ float weight_matrix[HEIGHT][WIDTH];
if ( tx == 0 )
input_node[ty] = input_cuda[index_in] ;
__syncthreads();
weight_matrix[ty][tx] = input_hidden_cuda[index];
__syncthreads();
weight_matrix[ty][tx] = weight_matrix[ty][tx] * input_node[ty];
__syncthreads();
for ( int i = 1 ; i <= __log2f(HEIGHT) ; i++){
int power_two = __powf(2, i);
if( ty % power_two == 0 )
weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + power_two/2][tx];
__syncthreads();
}
//__syncthreads();
input_hidden_cuda[index] = weight_matrix[ty][tx];
/*
for ( unsigned int i = 2 ; i <= HEIGHT ; i *= 2){
unsigned int power_two = i - 1;
if( (ty & power_two) == 0 ) {
weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + power_two/2][tx];
}
}
*/
__syncthreads();
if ( tx == 0 ) {
hidden_partial_sum[by * hid + ty] = weight_matrix[tx][ty];
}
}
__global__ void bpnn_adjust_weights_cuda(float * delta,
int hid,
float * ly,
int in,
float * w,
float * oldw)
{
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ;
int index_y = HEIGHT * by + ty + 1;
int index_x = tx + 1;
//eta = 0.3;
//momentum = 0.3;
w[index] += ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index]));
oldw[index] = ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index]));
__syncthreads();
if (ty == 0 && by ==0){
w[index_x] += ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x]));
oldw[index_x] = ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x]));
}
}
#endif
|
1ddd5883f81db4d3bf88facf1a441abf27dadf94.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*Author:
Pedro Silva
*/
// Analise o seguinte kernel, colorToGreyScaleConversion() , que produz a converso de cor
// para escala de cinzentos de uma imagem com mapeamento de dados em threads 2D
// Device code
// We have 3 channels corresponding to RGB
// The input image is encoded as unsigned characters [0, 255]
__global__ void colorToGreyScaleConvertion(unsigned char * grayImage, unsigned char
*rgbImage, int width, int height)
{
int Col=threadIdx.x+(blockIdx.x*blockDim.x);
int Row=threadIdx.y+(blockIdx.y*blockDim.y);
if (Col<width && Row<height){
// get 1D coordinate for the grayscale image
int greyOffset=Row*width + Col;
// one can think of the RGB image having
// CHANNEL times columns of the gray scale image
int rgbOffset=greyOffset*CHANNELS;
unsigned char r=rgbImage[rgbOffset];
// red value for pixel
unsigned char g=rgbImage[rgbOffset+1];
// green value for pixel
unsigned char b=rgbImage[rgbOffset+2];
// blue value for pixel
// perform the rescaling and store it
// We multiply by floating point constants
grayImage[grayOffset]=0.21f*r + 0.71f*g + 0.07f*b;
}
} | 1ddd5883f81db4d3bf88facf1a441abf27dadf94.cu | /*Author:
Pedro Silva
*/
// Analise o seguinte kernel, colorToGreyScaleConversion() , que produz a conversão de cor
// para escala de cinzentos de uma imagem com mapeamento de dados em threads 2D
// Device code
// We have 3 channels corresponding to RGB
// The input image is encoded as unsigned characters [0, 255]
__global__ void colorToGreyScaleConvertion(unsigned char * grayImage, unsigned char
*rgbImage, int width, int height)
{
int Col=threadIdx.x+(blockIdx.x*blockDim.x);
int Row=threadIdx.y+(blockIdx.y*blockDim.y);
if (Col<width && Row<height){
// get 1D coordinate for the grayscale image
int greyOffset=Row*width + Col;
// one can think of the RGB image having
// CHANNEL times columns of the gray scale image
int rgbOffset=greyOffset*CHANNELS;
unsigned char r=rgbImage[rgbOffset];
// red value for pixel
unsigned char g=rgbImage[rgbOffset+1];
// green value for pixel
unsigned char b=rgbImage[rgbOffset+2];
// blue value for pixel
// perform the rescaling and store it
// We multiply by floating point constants
grayImage[grayOffset]=0.21f*r + 0.71f*g + 0.07f*b;
}
} |
54c52c4746e6ba32427a01e7a33057d34537b754.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuOpticalFlowSolver.h"
#include "vector_td_utilities.h"
#include "check_CUDA.h"
#include <stdexcept>
namespace Gadgetron{
//
// Kernel prototype declarations
//
template<class REAL, unsigned int D> __global__
void spatial_grad_kernel(const REAL*, const REAL*,REAL*,typename uint64d<D>::Type,unsigned int,unsigned int);
template<class REAL, unsigned int D> __global__
void temporal_grad_kernel(const REAL*, const REAL*,REAL*,typename uint64d<D>::Type,unsigned int,unsigned int);
// There is some issue about Cuda defining min/max incompatibly...
//
template <class T> __host__ __device__ const T& _cuOF_max (const T& a, const T& b) {
return (a<b)?b:a;
}
template <class T> __host__ __device__ const T& _cuOF_min (const T& a, const T& b) {
return (a>b)?b:a;
}
template<class T, unsigned int D> void
cuOpticalFlowSolver<T,D>::setup_grid( dim3 *blockDim, dim3* gridDim,
unsigned int number_of_elements,
unsigned int num_batches,
bool use_2d_blocks,
unsigned int num_unknowns )
{
int device;
hipDeviceProp_t deviceProp;
if( hipGetDevice( &device ) != hipSuccess) {
throw std::runtime_error("cuOpticalFlowSolver::setup_grid(): unable to determine current device");
}
if( hipGetDeviceProperties( &deviceProp, device ) != hipSuccess) {
throw std::runtime_error("cuOpticalFlowSolver::setup_grid(): unable to query current device");
}
int max_blockdim = deviceProp.maxThreadsDim[0];
int max_griddim = deviceProp.maxGridSize[0];
int warp_size = deviceProp.warpSize;
// For small arrays we keep the block dimension fairly small
if( use_2d_blocks )
*blockDim = dim3(((256/num_unknowns)/warp_size)*warp_size, num_unknowns);
else
*blockDim = dim3(256);
*gridDim = dim3((number_of_elements+(blockDim->x*blockDim->y)-1)/(blockDim->x*blockDim->y), num_batches);
// Extend block/grid dimensions for large arrays
if( gridDim->x > max_griddim ){
if( use_2d_blocks )
blockDim->x = ((max_blockdim/num_unknowns)/warp_size)*warp_size;
else
blockDim->x = max_blockdim;
gridDim->x = (number_of_elements+(blockDim->x*blockDim->y)-1)/(blockDim->x*blockDim->y);
}
if( gridDim->x > max_griddim ){
gridDim->x = ((unsigned int)std::sqrt((T)number_of_elements)+(blockDim->x*blockDim->y)-1)/(blockDim->x*blockDim->y);
gridDim->y *= ((number_of_elements+(blockDim->x*blockDim->y)*gridDim->x-1)/((blockDim->x*blockDim->y)*gridDim->x));
}
if( gridDim->x > max_griddim || gridDim->y > max_griddim ){
throw std::runtime_error("cuOpticalFlowSolver::setup_grid(): maximum grid dimensions exceeded");
}
}
template<class T, unsigned int D> void
cuOpticalFlowSolver<T,D>::core_grad_spatial( T *fixed_image, T *moving_image, T *gradient_image,
typename uint64d<D>::Type matrix_size_moving,
size_t number_of_batches_fixed,
size_t number_of_batches_moving )
{
unsigned int number_of_elements = prod(matrix_size_moving);
dim3 blockDim; dim3 gridDim;
setup_grid( &blockDim, &gridDim, number_of_elements, _cuOF_max(number_of_batches_moving, number_of_batches_fixed)*D );
// Invoke kernel (spatial partial derivatives)
hipLaunchKernelGGL(( spatial_grad_kernel<T,D>), dim3(gridDim), dim3(blockDim) , 0, 0,
fixed_image, moving_image, gradient_image, matrix_size_moving, number_of_batches_fixed, number_of_batches_moving );
CHECK_FOR_CUDA_ERROR();
}
template<class T, unsigned int D> void
cuOpticalFlowSolver<T,D>::core_grad_temporal( T *fixed_image, T *moving_image, T *gradient_image,
typename uint64d<D>::Type matrix_size_moving,
size_t number_of_batches_fixed,
size_t number_of_batches_moving )
{
unsigned int number_of_elements = prod(matrix_size_moving);
dim3 blockDim; dim3 gridDim;
setup_grid( &blockDim, &gridDim, number_of_elements, _cuOF_max(number_of_batches_moving, number_of_batches_fixed) );
// Invoke kernel (temporal partial derivative)
hipLaunchKernelGGL(( temporal_grad_kernel<T,D>), dim3(gridDim), dim3(blockDim) , 0, 0,
fixed_image, moving_image, gradient_image,
matrix_size_moving, number_of_batches_fixed, number_of_batches_moving );
CHECK_FOR_CUDA_ERROR();
}
// Helpers
//
template<unsigned int D> __device__
typename uint64d<D>::Type compute_stride( unsigned int dim )
{
typename uint64d<D>::Type res;
for( unsigned int d=0; d<D; d++ ){
res.vec[d] = (d==dim) ? 1 : 0;
}
return res;
}
template<unsigned int D> __device__
bool is_border_pixel_in_stride_dim_before( unsigned int dim, typename uint64d<D>::Type co, typename uint64d<D>::Type dims )
{
if( co.vec[dim] == 0 )
return true;
else
return false;
}
template<unsigned int D> __device__
bool is_border_pixel_in_stride_dim_after( unsigned int dim, typename uint64d<D>::Type co, typename uint64d<D>::Type dims )
{
if( co.vec[dim] == (dims.vec[dim]-1) )
return true;
else
return false;
}
// Spatial partial derivatives
//
template<class REAL, unsigned int D> __global__ void
spatial_grad_kernel( const REAL * __restrict__ fixed_image, const REAL * __restrict__ moving_image, REAL * __restrict__ gradient_image,
typename uint64d<D>::Type matrix_size,
unsigned int num_batches_fixed, unsigned int num_batches_moving )
{
const unsigned int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x+threadIdx.x;
// Number of elements per partial derivate
const unsigned int num_elements_per_batch = prod(matrix_size);
const unsigned int num_elements_per_pdev_fixed = num_elements_per_batch*num_batches_fixed;
const unsigned int num_elements_per_pdev_moving = num_elements_per_batch*num_batches_moving;
// Total number of elements for all partial derivatives
const unsigned int num_elements_total = _cuOF_max(num_elements_per_pdev_fixed, num_elements_per_pdev_moving)*D;
if( idx < num_elements_total ){
// The (minimum) index in the slowest varying output dimension determines which partial derivative to compute
const unsigned int stride_dim_fixed = idx/(num_elements_per_pdev_fixed);
const unsigned int stride_dim_moving = idx/(num_elements_per_pdev_moving);
const unsigned int stride_dim = _cuOF_min(stride_dim_fixed, stride_dim_moving);
// Local index to the partial derivative
const unsigned int idx_in_pdev_fixed = idx-stride_dim_fixed*num_elements_per_pdev_fixed;
const unsigned int idx_in_pdev_moving = idx-stride_dim_moving*num_elements_per_pdev_moving;
// Batch idx (second slowest varying dimension)
const unsigned int batch_idx_fixed = idx_in_pdev_fixed/num_elements_per_batch;
const unsigned int batch_idx_moving = idx_in_pdev_moving/num_elements_per_batch;
// Local index to the batch (should be identical for the fixed/moving image)
const unsigned int idx_in_batch = idx_in_pdev_moving-batch_idx_moving*num_elements_per_batch;
// Local co to the image
const typename uint64d<D>::Type co = idx_to_co<D>( idx_in_batch, matrix_size );
REAL res;
unsigned int count = 0;
//
// Find partial derivatives using central differences
//
typename uint64d<D>::Type stride = compute_stride<D>(stride_dim);
const unsigned int base_idx_moving = batch_idx_moving*num_elements_per_batch;
const unsigned int base_idx_fixed = batch_idx_fixed*num_elements_per_batch;
unsigned int stride_base_idx, fixed_idx, moving_idx;
// Neighbor "plus stride" side
if( !is_border_pixel_in_stride_dim_after<D>( stride_dim, co, matrix_size )){
stride_base_idx = co_to_idx<D>(co+stride, matrix_size);
count++;
}
else{
stride_base_idx = idx_in_batch;
}
fixed_idx = stride_base_idx+base_idx_fixed;
moving_idx = stride_base_idx+base_idx_moving;
res = (fixed_image[fixed_idx]+moving_image[moving_idx])*REAL(0.5);
// Neighbor "minus stride" side
if( !is_border_pixel_in_stride_dim_before<D>( stride_dim, co, matrix_size )){
stride_base_idx = co_to_idx<D>(co-stride, matrix_size);
count++;
}
else{
stride_base_idx = co_to_idx<D>(co, matrix_size);
}
fixed_idx = stride_base_idx+base_idx_fixed;
moving_idx = stride_base_idx+base_idx_moving;
res -= (fixed_image[fixed_idx]+moving_image[moving_idx])*REAL(0.5);
if( count == 2 ) // Both neighbors exist
res /= REAL(2);
// Output result
//
gradient_image[idx] = res;
}
}
// Temporal partial derivatives
//
template<class REAL, unsigned int D> __global__ void
temporal_grad_kernel( const REAL * __restrict__ fixed_image, const REAL * __restrict__ moving_image, REAL * __restrict__ gradient_image,
typename uint64d<D>::Type matrix_size,
unsigned int num_batches_fixed, unsigned int num_batches_moving )
{
const unsigned int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x+threadIdx.x;
// Number of elements per partial derivate
const unsigned int num_elements_per_batch = prod(matrix_size);
const unsigned int num_elements_per_pdev_fixed = num_elements_per_batch*num_batches_fixed;
const unsigned int num_elements_per_pdev_moving = num_elements_per_batch*num_batches_moving;
// Total number of elements for all partial derivatives
const unsigned int num_elements_total = _cuOF_max(num_elements_per_pdev_fixed, num_elements_per_pdev_moving);
if( idx < num_elements_total ){
const unsigned int stride_dim_fixed = idx/(num_elements_per_pdev_fixed);
const unsigned int stride_dim_moving = idx/(num_elements_per_pdev_moving);
// Local index to the partial derivative
const unsigned int idx_in_pdev_fixed = idx-stride_dim_fixed*num_elements_per_pdev_fixed;
const unsigned int idx_in_pdev_moving = idx-stride_dim_moving*num_elements_per_pdev_moving;
// Batch idx (second slowest varying dimension)
const unsigned int batch_idx_fixed = idx_in_pdev_fixed/num_elements_per_batch;
const unsigned int batch_idx_moving = idx_in_pdev_moving/num_elements_per_batch;
// Local index to the batch (should be identical for the fixed/moving image)
const unsigned int idx_in_batch = idx_in_pdev_moving-batch_idx_moving*num_elements_per_batch;
const unsigned int base_idx_fixed = batch_idx_fixed*num_elements_per_batch;
const unsigned int base_idx_moving = batch_idx_moving*num_elements_per_batch;
// Ctr pixel
const unsigned int fixed_idx = idx_in_batch+base_idx_fixed;
const unsigned int moving_idx = idx_in_batch+base_idx_moving;
const REAL res = moving_image[moving_idx]-fixed_image[fixed_idx];
// Output result
//
gradient_image[idx] = res;
}
}
//
// Template instantiation
//
template class EXPORTGPUREG cuOpticalFlowSolver<float,1>;
template class EXPORTGPUREG cuOpticalFlowSolver<float,2>;
template class EXPORTGPUREG cuOpticalFlowSolver<float,3>;
template class EXPORTGPUREG cuOpticalFlowSolver<float,4>;
template class EXPORTGPUREG cuOpticalFlowSolver<double,1>;
template class EXPORTGPUREG cuOpticalFlowSolver<double,2>;
template class EXPORTGPUREG cuOpticalFlowSolver<double,3>;
template class EXPORTGPUREG cuOpticalFlowSolver<double,4>;
}
| 54c52c4746e6ba32427a01e7a33057d34537b754.cu | #include "cuOpticalFlowSolver.h"
#include "vector_td_utilities.h"
#include "check_CUDA.h"
#include <stdexcept>
namespace Gadgetron{
//
// Kernel prototype declarations
//
template<class REAL, unsigned int D> __global__
void spatial_grad_kernel(const REAL*, const REAL*,REAL*,typename uint64d<D>::Type,unsigned int,unsigned int);
template<class REAL, unsigned int D> __global__
void temporal_grad_kernel(const REAL*, const REAL*,REAL*,typename uint64d<D>::Type,unsigned int,unsigned int);
// There is some issue about Cuda defining min/max incompatibly...
//
template <class T> __host__ __device__ const T& _cuOF_max (const T& a, const T& b) {
return (a<b)?b:a;
}
template <class T> __host__ __device__ const T& _cuOF_min (const T& a, const T& b) {
return (a>b)?b:a;
}
template<class T, unsigned int D> void
cuOpticalFlowSolver<T,D>::setup_grid( dim3 *blockDim, dim3* gridDim,
unsigned int number_of_elements,
unsigned int num_batches,
bool use_2d_blocks,
unsigned int num_unknowns )
{
int device;
cudaDeviceProp deviceProp;
if( cudaGetDevice( &device ) != cudaSuccess) {
throw std::runtime_error("cuOpticalFlowSolver::setup_grid(): unable to determine current device");
}
if( cudaGetDeviceProperties( &deviceProp, device ) != cudaSuccess) {
throw std::runtime_error("cuOpticalFlowSolver::setup_grid(): unable to query current device");
}
int max_blockdim = deviceProp.maxThreadsDim[0];
int max_griddim = deviceProp.maxGridSize[0];
int warp_size = deviceProp.warpSize;
// For small arrays we keep the block dimension fairly small
if( use_2d_blocks )
*blockDim = dim3(((256/num_unknowns)/warp_size)*warp_size, num_unknowns);
else
*blockDim = dim3(256);
*gridDim = dim3((number_of_elements+(blockDim->x*blockDim->y)-1)/(blockDim->x*blockDim->y), num_batches);
// Extend block/grid dimensions for large arrays
if( gridDim->x > max_griddim ){
if( use_2d_blocks )
blockDim->x = ((max_blockdim/num_unknowns)/warp_size)*warp_size;
else
blockDim->x = max_blockdim;
gridDim->x = (number_of_elements+(blockDim->x*blockDim->y)-1)/(blockDim->x*blockDim->y);
}
if( gridDim->x > max_griddim ){
gridDim->x = ((unsigned int)std::sqrt((T)number_of_elements)+(blockDim->x*blockDim->y)-1)/(blockDim->x*blockDim->y);
gridDim->y *= ((number_of_elements+(blockDim->x*blockDim->y)*gridDim->x-1)/((blockDim->x*blockDim->y)*gridDim->x));
}
if( gridDim->x > max_griddim || gridDim->y > max_griddim ){
throw std::runtime_error("cuOpticalFlowSolver::setup_grid(): maximum grid dimensions exceeded");
}
}
template<class T, unsigned int D> void
cuOpticalFlowSolver<T,D>::core_grad_spatial( T *fixed_image, T *moving_image, T *gradient_image,
typename uint64d<D>::Type matrix_size_moving,
size_t number_of_batches_fixed,
size_t number_of_batches_moving )
{
unsigned int number_of_elements = prod(matrix_size_moving);
dim3 blockDim; dim3 gridDim;
setup_grid( &blockDim, &gridDim, number_of_elements, _cuOF_max(number_of_batches_moving, number_of_batches_fixed)*D );
// Invoke kernel (spatial partial derivatives)
spatial_grad_kernel<T,D><<< gridDim, blockDim >>>
( fixed_image, moving_image, gradient_image, matrix_size_moving, number_of_batches_fixed, number_of_batches_moving );
CHECK_FOR_CUDA_ERROR();
}
template<class T, unsigned int D> void
cuOpticalFlowSolver<T,D>::core_grad_temporal( T *fixed_image, T *moving_image, T *gradient_image,
typename uint64d<D>::Type matrix_size_moving,
size_t number_of_batches_fixed,
size_t number_of_batches_moving )
{
unsigned int number_of_elements = prod(matrix_size_moving);
dim3 blockDim; dim3 gridDim;
setup_grid( &blockDim, &gridDim, number_of_elements, _cuOF_max(number_of_batches_moving, number_of_batches_fixed) );
// Invoke kernel (temporal partial derivative)
temporal_grad_kernel<T,D><<< gridDim, blockDim >>>
( fixed_image, moving_image, gradient_image,
matrix_size_moving, number_of_batches_fixed, number_of_batches_moving );
CHECK_FOR_CUDA_ERROR();
}
// Helpers
//
template<unsigned int D> __device__
typename uint64d<D>::Type compute_stride( unsigned int dim )
{
typename uint64d<D>::Type res;
for( unsigned int d=0; d<D; d++ ){
res.vec[d] = (d==dim) ? 1 : 0;
}
return res;
}
template<unsigned int D> __device__
bool is_border_pixel_in_stride_dim_before( unsigned int dim, typename uint64d<D>::Type co, typename uint64d<D>::Type dims )
{
if( co.vec[dim] == 0 )
return true;
else
return false;
}
template<unsigned int D> __device__
bool is_border_pixel_in_stride_dim_after( unsigned int dim, typename uint64d<D>::Type co, typename uint64d<D>::Type dims )
{
if( co.vec[dim] == (dims.vec[dim]-1) )
return true;
else
return false;
}
// Spatial partial derivatives
//
template<class REAL, unsigned int D> __global__ void
spatial_grad_kernel( const REAL * __restrict__ fixed_image, const REAL * __restrict__ moving_image, REAL * __restrict__ gradient_image,
typename uint64d<D>::Type matrix_size,
unsigned int num_batches_fixed, unsigned int num_batches_moving )
{
const unsigned int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x+threadIdx.x;
// Number of elements per partial derivate
const unsigned int num_elements_per_batch = prod(matrix_size);
const unsigned int num_elements_per_pdev_fixed = num_elements_per_batch*num_batches_fixed;
const unsigned int num_elements_per_pdev_moving = num_elements_per_batch*num_batches_moving;
// Total number of elements for all partial derivatives
const unsigned int num_elements_total = _cuOF_max(num_elements_per_pdev_fixed, num_elements_per_pdev_moving)*D;
if( idx < num_elements_total ){
// The (minimum) index in the slowest varying output dimension determines which partial derivative to compute
const unsigned int stride_dim_fixed = idx/(num_elements_per_pdev_fixed);
const unsigned int stride_dim_moving = idx/(num_elements_per_pdev_moving);
const unsigned int stride_dim = _cuOF_min(stride_dim_fixed, stride_dim_moving);
// Local index to the partial derivative
const unsigned int idx_in_pdev_fixed = idx-stride_dim_fixed*num_elements_per_pdev_fixed;
const unsigned int idx_in_pdev_moving = idx-stride_dim_moving*num_elements_per_pdev_moving;
// Batch idx (second slowest varying dimension)
const unsigned int batch_idx_fixed = idx_in_pdev_fixed/num_elements_per_batch;
const unsigned int batch_idx_moving = idx_in_pdev_moving/num_elements_per_batch;
// Local index to the batch (should be identical for the fixed/moving image)
const unsigned int idx_in_batch = idx_in_pdev_moving-batch_idx_moving*num_elements_per_batch;
// Local co to the image
const typename uint64d<D>::Type co = idx_to_co<D>( idx_in_batch, matrix_size );
REAL res;
unsigned int count = 0;
//
// Find partial derivatives using central differences
//
typename uint64d<D>::Type stride = compute_stride<D>(stride_dim);
const unsigned int base_idx_moving = batch_idx_moving*num_elements_per_batch;
const unsigned int base_idx_fixed = batch_idx_fixed*num_elements_per_batch;
unsigned int stride_base_idx, fixed_idx, moving_idx;
// Neighbor "plus stride" side
if( !is_border_pixel_in_stride_dim_after<D>( stride_dim, co, matrix_size )){
stride_base_idx = co_to_idx<D>(co+stride, matrix_size);
count++;
}
else{
stride_base_idx = idx_in_batch;
}
fixed_idx = stride_base_idx+base_idx_fixed;
moving_idx = stride_base_idx+base_idx_moving;
res = (fixed_image[fixed_idx]+moving_image[moving_idx])*REAL(0.5);
// Neighbor "minus stride" side
if( !is_border_pixel_in_stride_dim_before<D>( stride_dim, co, matrix_size )){
stride_base_idx = co_to_idx<D>(co-stride, matrix_size);
count++;
}
else{
stride_base_idx = co_to_idx<D>(co, matrix_size);
}
fixed_idx = stride_base_idx+base_idx_fixed;
moving_idx = stride_base_idx+base_idx_moving;
res -= (fixed_image[fixed_idx]+moving_image[moving_idx])*REAL(0.5);
if( count == 2 ) // Both neighbors exist
res /= REAL(2);
// Output result
//
gradient_image[idx] = res;
}
}
// Temporal partial derivatives
//
template<class REAL, unsigned int D> __global__ void
temporal_grad_kernel( const REAL * __restrict__ fixed_image, const REAL * __restrict__ moving_image, REAL * __restrict__ gradient_image,
typename uint64d<D>::Type matrix_size,
unsigned int num_batches_fixed, unsigned int num_batches_moving )
{
const unsigned int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x+threadIdx.x;
// Number of elements per partial derivate
const unsigned int num_elements_per_batch = prod(matrix_size);
const unsigned int num_elements_per_pdev_fixed = num_elements_per_batch*num_batches_fixed;
const unsigned int num_elements_per_pdev_moving = num_elements_per_batch*num_batches_moving;
// Total number of elements for all partial derivatives
const unsigned int num_elements_total = _cuOF_max(num_elements_per_pdev_fixed, num_elements_per_pdev_moving);
if( idx < num_elements_total ){
const unsigned int stride_dim_fixed = idx/(num_elements_per_pdev_fixed);
const unsigned int stride_dim_moving = idx/(num_elements_per_pdev_moving);
// Local index to the partial derivative
const unsigned int idx_in_pdev_fixed = idx-stride_dim_fixed*num_elements_per_pdev_fixed;
const unsigned int idx_in_pdev_moving = idx-stride_dim_moving*num_elements_per_pdev_moving;
// Batch idx (second slowest varying dimension)
const unsigned int batch_idx_fixed = idx_in_pdev_fixed/num_elements_per_batch;
const unsigned int batch_idx_moving = idx_in_pdev_moving/num_elements_per_batch;
// Local index to the batch (should be identical for the fixed/moving image)
const unsigned int idx_in_batch = idx_in_pdev_moving-batch_idx_moving*num_elements_per_batch;
const unsigned int base_idx_fixed = batch_idx_fixed*num_elements_per_batch;
const unsigned int base_idx_moving = batch_idx_moving*num_elements_per_batch;
// Ctr pixel
const unsigned int fixed_idx = idx_in_batch+base_idx_fixed;
const unsigned int moving_idx = idx_in_batch+base_idx_moving;
const REAL res = moving_image[moving_idx]-fixed_image[fixed_idx];
// Output result
//
gradient_image[idx] = res;
}
}
//
// Template instantiation
//
template class EXPORTGPUREG cuOpticalFlowSolver<float,1>;
template class EXPORTGPUREG cuOpticalFlowSolver<float,2>;
template class EXPORTGPUREG cuOpticalFlowSolver<float,3>;
template class EXPORTGPUREG cuOpticalFlowSolver<float,4>;
template class EXPORTGPUREG cuOpticalFlowSolver<double,1>;
template class EXPORTGPUREG cuOpticalFlowSolver<double,2>;
template class EXPORTGPUREG cuOpticalFlowSolver<double,3>;
template class EXPORTGPUREG cuOpticalFlowSolver<double,4>;
}
|
b45e473ab074690d0254810f2ff35541f1782434.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void cube(float * d_out, float * d_in){
int idx=threadIdx.x;
float f =d_in[idx];
d_out[idx]=f*f*f ;
// Todo: Fill in this function
}
int main(int argc, char ** argv) {
const int ARRAY_SIZE = 96;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
float h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) { h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
// declare GPU memory pointers
float * d_in;
float * d_out;
// allocate GPU memory
hipMalloc((void**) &d_in, ARRAY_BYTES);
hipMalloc((void**) &d_out, ARRAY_BYTES);
// transfer the array to the GPU
hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
// launch the kernel
hipLaunchKernelGGL(( cube), dim3(1), dim3(ARRAY_SIZE), 0, 0, d_out, d_in);
// copy back the result array to the CPU
hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost);
// print out the resulting array
for (int i =0; i < ARRAY_SIZE; i++) {
printf("%f", h_out[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
hipFree(d_in);
hipFree(d_out);
return 0;
}
| b45e473ab074690d0254810f2ff35541f1782434.cu | #include <stdio.h>
__global__ void cube(float * d_out, float * d_in){
int idx=threadIdx.x;
float f =d_in[idx];
d_out[idx]=f*f*f ;
// Todo: Fill in this function
}
int main(int argc, char ** argv) {
const int ARRAY_SIZE = 96;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
float h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) { h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
// declare GPU memory pointers
float * d_in;
float * d_out;
// allocate GPU memory
cudaMalloc((void**) &d_in, ARRAY_BYTES);
cudaMalloc((void**) &d_out, ARRAY_BYTES);
// transfer the array to the GPU
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
// launch the kernel
cube<<<1, ARRAY_SIZE>>>(d_out, d_in);
// copy back the result array to the CPU
cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
// print out the resulting array
for (int i =0; i < ARRAY_SIZE; i++) {
printf("%f", h_out[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
96f50f00e1b98ffdc49a30d3160dafc9b81f04c2.hip | // !!! This is a file automatically generated by hipify!!!
#include "../common/common.h"
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <stdlib.h>
/*
* This example demonstrates submitting work to a CUDA stream in depth-first
* order. Work submission in depth-first order may introduce false-dependencies
* between unrelated tasks in different CUDA streams, limiting the parallelism
* of a CUDA application. kernel_1, kernel_2, kernel_3, and kernel_4 simply
* implement identical, dummy computation. Separate kernels are used to make the
* scheduling of these kernels simpler to visualize in the Visual Profiler.
*/
#define N 300000
#define NSTREAM 4
__global__ void kernel_1()
{
double sum = 0.0;
for(int i = 0; i < N; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
__global__ void kernel_2()
{
double sum = 0.0;
for(int i = 0; i < N; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
__global__ void kernel_3()
{
double sum = 0.0;
for(int i = 0; i < N; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
__global__ void kernel_4()
{
double sum = 0.0;
for(int i = 0; i < N; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
int main(int argc, char **argv)
{
int n_streams = NSTREAM;
int isize = 1;
int iblock = 1;
int bigcase = 0;
// get argument from command line
if (argc > 1) n_streams = atoi(argv[1]);
if (argc > 2) bigcase = atoi(argv[2]);
float elapsed_time;
// set up max connectioin
char *iname = "CUDA_DEVICE_MAX_CONNECTIONS";
SET_ENV(iname, "32", 1);
char *ivalue = GET_ENV(iname);
//printf("%s = %s\n", iname, ivalue);
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("> Using Device %d: %s with num_streams=%d\n", dev, deviceProp.name, n_streams);
CHECK(hipSetDevice(dev));
// check if device support hyper-q
if (deviceProp.major < 3 || (deviceProp.major == 3 && deviceProp.minor < 5))
{
if (deviceProp.concurrentKernels == 0)
{
printf("> GPU does not support concurrent kernel execution (SM 3.5 or higher required)\n");
printf("> CUDA kernel runs will be serialized\n");
}
else
{
printf("> GPU does not support HyperQ\n");
printf("> CUDA kernel runs will have limited concurrency\n");
}
}
printf("> Compute Capability %d.%d hardware with %d multi-processors\n",
deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount);
// Allocate and initialize an array of stream handles
hipStream_t *streams = (hipStream_t *) malloc(n_streams * sizeof(hipStream_t));
for (int i = 0; i < n_streams; i++)
{
CHECK(hipStreamCreate(&(streams[i])));
}
// run kernel with more threads
if (bigcase == 1)
{
iblock = 512;
isize = 1 << 12;
}
// set up execution configuration
dim3 block(iblock);
dim3 grid(isize / iblock);
//printf("> grid %d block %d\n", grid.x, block.x);
// creat events
hipEvent_t start, stop;
CHECK(hipEventCreate(&start));
CHECK(hipEventCreate(&stop));
// record start event
CHECK(hipEventRecord(start, 0));
// dispatch job with depth first ordering
for (int i = 0; i < n_streams; i++)
{
hipLaunchKernelGGL(( kernel_1), dim3(grid), dim3(block), 0, streams[i], );
hipLaunchKernelGGL(( kernel_2), dim3(grid), dim3(block), 0, streams[i], );
hipLaunchKernelGGL(( kernel_3), dim3(grid), dim3(block), 0, streams[i], );
hipLaunchKernelGGL(( kernel_4), dim3(grid), dim3(block), 0, streams[i], );
}
// record stop event
CHECK(hipEventRecord(stop, 0));
CHECK(hipEventSynchronize(stop));
// calculate elapsed time
CHECK(hipEventElapsedTime(&elapsed_time, start, stop));
printf("Measured time for parallel execution = %.3fs\n", elapsed_time / 1000.0f);
// release all stream
for (int i = 0; i < n_streams; i++)
{
CHECK(hipStreamDestroy(streams[i]));
}
free(streams);
// destroy events
CHECK(hipEventDestroy(start));
CHECK(hipEventDestroy(stop));
// reset device
CHECK(hipDeviceReset());
return 0;
}
| 96f50f00e1b98ffdc49a30d3160dafc9b81f04c2.cu | #include "../common/common.h"
#include <stdio.h>
#include <cuda_runtime.h>
#include <stdlib.h>
/*
* This example demonstrates submitting work to a CUDA stream in depth-first
* order. Work submission in depth-first order may introduce false-dependencies
* between unrelated tasks in different CUDA streams, limiting the parallelism
* of a CUDA application. kernel_1, kernel_2, kernel_3, and kernel_4 simply
* implement identical, dummy computation. Separate kernels are used to make the
* scheduling of these kernels simpler to visualize in the Visual Profiler.
*/
#define N 300000
#define NSTREAM 4
__global__ void kernel_1()
{
double sum = 0.0;
for(int i = 0; i < N; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
__global__ void kernel_2()
{
double sum = 0.0;
for(int i = 0; i < N; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
__global__ void kernel_3()
{
double sum = 0.0;
for(int i = 0; i < N; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
__global__ void kernel_4()
{
double sum = 0.0;
for(int i = 0; i < N; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
int main(int argc, char **argv)
{
int n_streams = NSTREAM;
int isize = 1;
int iblock = 1;
int bigcase = 0;
// get argument from command line
if (argc > 1) n_streams = atoi(argv[1]);
if (argc > 2) bigcase = atoi(argv[2]);
float elapsed_time;
// set up max connectioin
char *iname = "CUDA_DEVICE_MAX_CONNECTIONS";
SET_ENV(iname, "32", 1);
char *ivalue = GET_ENV(iname);
//printf("%s = %s\n", iname, ivalue);
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("> Using Device %d: %s with num_streams=%d\n", dev, deviceProp.name, n_streams);
CHECK(cudaSetDevice(dev));
// check if device support hyper-q
if (deviceProp.major < 3 || (deviceProp.major == 3 && deviceProp.minor < 5))
{
if (deviceProp.concurrentKernels == 0)
{
printf("> GPU does not support concurrent kernel execution (SM 3.5 or higher required)\n");
printf("> CUDA kernel runs will be serialized\n");
}
else
{
printf("> GPU does not support HyperQ\n");
printf("> CUDA kernel runs will have limited concurrency\n");
}
}
printf("> Compute Capability %d.%d hardware with %d multi-processors\n",
deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount);
// Allocate and initialize an array of stream handles
cudaStream_t *streams = (cudaStream_t *) malloc(n_streams * sizeof(cudaStream_t));
for (int i = 0; i < n_streams; i++)
{
CHECK(cudaStreamCreate(&(streams[i])));
}
// run kernel with more threads
if (bigcase == 1)
{
iblock = 512;
isize = 1 << 12;
}
// set up execution configuration
dim3 block(iblock);
dim3 grid(isize / iblock);
//printf("> grid %d block %d\n", grid.x, block.x);
// creat events
cudaEvent_t start, stop;
CHECK(cudaEventCreate(&start));
CHECK(cudaEventCreate(&stop));
// record start event
CHECK(cudaEventRecord(start, 0));
// dispatch job with depth first ordering
for (int i = 0; i < n_streams; i++)
{
kernel_1<<<grid, block, 0, streams[i]>>>();
kernel_2<<<grid, block, 0, streams[i]>>>();
kernel_3<<<grid, block, 0, streams[i]>>>();
kernel_4<<<grid, block, 0, streams[i]>>>();
}
// record stop event
CHECK(cudaEventRecord(stop, 0));
CHECK(cudaEventSynchronize(stop));
// calculate elapsed time
CHECK(cudaEventElapsedTime(&elapsed_time, start, stop));
printf("Measured time for parallel execution = %.3fs\n", elapsed_time / 1000.0f);
// release all stream
for (int i = 0; i < n_streams; i++)
{
CHECK(cudaStreamDestroy(streams[i]));
}
free(streams);
// destroy events
CHECK(cudaEventDestroy(start));
CHECK(cudaEventDestroy(stop));
// reset device
CHECK(cudaDeviceReset());
return 0;
}
|
79d478ce29739a90592f8b83a9f62d33db7a852d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <assert.h>
#include "RayTracing.h"
#include "Device.h"
#include "MathTools.h"
#include "SphereFactory.h"
#include "ConstantMemoryLink.h"
using cpu::IntervalI;
/* on fait des define car conditions ne bougent plus l'excution et c'est plus performant que les IF qui doivent tre valus chaque itration
* Ces valeurs doivent tre cohrente avec celles prsentes dans RayTracingDevice.cu
*/
//TODO PROF: jouer avec ces valeurs. Attention: Il doit y avoir une seule et unique valeur a true.
#define USE_GM false
#define USE_CM false
#define USE_SM true
#define LENGTH 1000
__constant__ Sphere TAB_DATA_CM[LENGTH];
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
extern __global__ void rayTracing(uchar4* ptrDevPixels, int w, int h, Sphere* ptrDevSpheres, int n, float t);
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*-------------------------*\
|* Constructeur *|
\*-------------------------*/
RayTracing::RayTracing(int w, int h, int nSphere, float dt)
{
// Inputs
this->w = w;
this->h = h;
this->nSphere = nSphere;
this->dt = dt;
// Tools
this->dg = dim3(16, 2, 1); // disons a optimiser
this->db = dim3(32, 4, 1); // disons a optimiser
int margin = 50;
this->ptrSpheres = SphereFactory::createSpheres(nSphere, w, h, margin);
this->t = 0.0f;
//Outputs
this->title = "RayTracing_CUDA";
// Check:
//print(dg, db);
Device::assertDim(dg, db);
#if USE_CM
copySpheresToConstantMemory();
#endif
sizeSpheres = sizeof(Sphere) * LENGTH;
#if USE_GM == true || USE_SM == true
HANDLE_ERROR(hipMalloc(&ptrDevSpheres, sizeSpheres));
HANDLE_ERROR(hipMemcpy(ptrDevSpheres, ptrSpheres, sizeSpheres, hipMemcpyHostToDevice));
#endif
}
RayTracing::~RayTracing()
{
delete[] this->ptrSpheres;
HANDLE_ERROR(hipFree(ptrDevSpheres));
}
/*-------------------------*\
|* Methode *|
\*-------------------------*/
ConstantMemoryLink constantMemoryLink(void)
{
Sphere* ptrDevTabData;
size_t sizeAll = LENGTH * sizeof(Sphere);
HANDLE_ERROR(hipGetSymbolAddress((void ** )&ptrDevTabData, TAB_DATA_CM));
ConstantMemoryLink cmLink =
{
(void**) ptrDevTabData, LENGTH, sizeAll
};
return cmLink;
}
void RayTracing::copySpheresToConstantMemory()
{
ConstantMemoryLink cmLink = constantMemoryLink();
this->ptrDevSpheres = (Sphere*) cmLink.ptrDevTab;
size_t sizeALL = cmLink.sizeAll;
HANDLE_ERROR(hipMemcpy(ptrDevSpheres, ptrSpheres, sizeALL, hipMemcpyHostToDevice));
}
/**
* Override
* Call periodicly by the API
*/
void RayTracing::process(uchar4* ptrDevPixels, int w, int h)
{
// rayTracing<<<dg,db>>>(ptrDevPixels, w, h, ptrDevSpheres, this->nSphere, t);
#if USE_GM == true || USE_CM == true
hipLaunchKernelGGL(( rayTracing), dim3(dg), dim3(db), 0, 0, ptrDevPixels, w, h, ptrDevSpheres, this->nSphere, t);
#endif
#if USE_SM
hipLaunchKernelGGL(( rayTracing), dim3(dg), dim3(db), sizeSpheres, 0, ptrDevPixels, w, h, ptrDevSpheres, this->nSphere, t);
#endif
}
/**
* Override
* Call periodicly by the API
*/
void RayTracing::animationStep()
{
t += dt;
}
/*--------------*\
|* get *|
\*--------------*/
/**
* Override
*/
float RayTracing::getAnimationPara(void)
{
return t;
}
/**
* Override
*/
int RayTracing::getW(void)
{
return w;
}
/**
* Override
*/
int RayTracing::getH(void)
{
return h;
}
/**
* Override
*/
string RayTracing::getTitle(void)
{
return title;
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
| 79d478ce29739a90592f8b83a9f62d33db7a852d.cu | #include <iostream>
#include <assert.h>
#include "RayTracing.h"
#include "Device.h"
#include "MathTools.h"
#include "SphereFactory.h"
#include "ConstantMemoryLink.h"
using cpu::IntervalI;
/* on fait des define car conditions ne bougent plus à l'exécution et c'est plus performant que les IF qui doivent être évalués à chaque itération
* Ces valeurs doivent être cohérente avec celles présentes dans RayTracingDevice.cu
*/
//TODO PROF: jouer avec ces valeurs. Attention: Il doit y avoir une seule et unique valeur a true.
#define USE_GM false
#define USE_CM false
#define USE_SM true
#define LENGTH 1000
__constant__ Sphere TAB_DATA_CM[LENGTH];
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
extern __global__ void rayTracing(uchar4* ptrDevPixels, int w, int h, Sphere* ptrDevSpheres, int n, float t);
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*-------------------------*\
|* Constructeur *|
\*-------------------------*/
RayTracing::RayTracing(int w, int h, int nSphere, float dt)
{
// Inputs
this->w = w;
this->h = h;
this->nSphere = nSphere;
this->dt = dt;
// Tools
this->dg = dim3(16, 2, 1); // disons a optimiser
this->db = dim3(32, 4, 1); // disons a optimiser
int margin = 50;
this->ptrSpheres = SphereFactory::createSpheres(nSphere, w, h, margin);
this->t = 0.0f;
//Outputs
this->title = "RayTracing_CUDA";
// Check:
//print(dg, db);
Device::assertDim(dg, db);
#if USE_CM
copySpheresToConstantMemory();
#endif
sizeSpheres = sizeof(Sphere) * LENGTH;
#if USE_GM == true || USE_SM == true
HANDLE_ERROR(cudaMalloc(&ptrDevSpheres, sizeSpheres));
HANDLE_ERROR(cudaMemcpy(ptrDevSpheres, ptrSpheres, sizeSpheres, cudaMemcpyHostToDevice));
#endif
}
RayTracing::~RayTracing()
{
delete[] this->ptrSpheres;
HANDLE_ERROR(cudaFree(ptrDevSpheres));
}
/*-------------------------*\
|* Methode *|
\*-------------------------*/
ConstantMemoryLink constantMemoryLink(void)
{
Sphere* ptrDevTabData;
size_t sizeAll = LENGTH * sizeof(Sphere);
HANDLE_ERROR(cudaGetSymbolAddress((void ** )&ptrDevTabData, TAB_DATA_CM));
ConstantMemoryLink cmLink =
{
(void**) ptrDevTabData, LENGTH, sizeAll
};
return cmLink;
}
void RayTracing::copySpheresToConstantMemory()
{
ConstantMemoryLink cmLink = constantMemoryLink();
this->ptrDevSpheres = (Sphere*) cmLink.ptrDevTab;
size_t sizeALL = cmLink.sizeAll;
HANDLE_ERROR(cudaMemcpy(ptrDevSpheres, ptrSpheres, sizeALL, cudaMemcpyHostToDevice));
}
/**
* Override
* Call periodicly by the API
*/
void RayTracing::process(uchar4* ptrDevPixels, int w, int h)
{
// rayTracing<<<dg,db>>>(ptrDevPixels, w, h, ptrDevSpheres, this->nSphere, t);
#if USE_GM == true || USE_CM == true
rayTracing<<<dg, db>>>(ptrDevPixels, w, h, ptrDevSpheres, this->nSphere, t);
#endif
#if USE_SM
rayTracing<<<dg, db, sizeSpheres>>>(ptrDevPixels, w, h, ptrDevSpheres, this->nSphere, t);
#endif
}
/**
* Override
* Call periodicly by the API
*/
void RayTracing::animationStep()
{
t += dt;
}
/*--------------*\
|* get *|
\*--------------*/
/**
* Override
*/
float RayTracing::getAnimationPara(void)
{
return t;
}
/**
* Override
*/
int RayTracing::getW(void)
{
return w;
}
/**
* Override
*/
int RayTracing::getH(void)
{
return h;
}
/**
* Override
*/
string RayTracing::getTitle(void)
{
return title;
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
0a9bf5dae76a6d1e3f68c6579425fee62a48eeac.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2015 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include "../debug.h"
__global__ void add(int n, int *a, int *b, int *c)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n)
c[index] = a[index] + b[index];
}
#define N (2048*2048)
#define THREADS_PER_BLOCK 512
int main()
{
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N * sizeof( int );
/* get GPU device number and name */
int dev;
hipDeviceProp_t deviceProp;
checkCUDA( hipGetDevice( &dev ) );
checkCUDA( hipGetDeviceProperties( &deviceProp, dev ) );
printf("Using GPU %d: %s\n", dev, deviceProp.name );
/* allocate space for device copies of a, b, c */
checkCUDA( hipMalloc( (void **) &d_a, size ) );
checkCUDA( hipMalloc( (void **) &d_b, size ) );
checkCUDA( hipMalloc( (void **) &d_c, size ) );
/* allocate space for host copies of a, b, c and setup input values */
a = (int *)malloc( size );
b = (int *)malloc( size );
c = (int *)malloc( size );
for( int i = 0; i < N; i++ )
{
a[i] = b[i] = i;
c[i] = 0;
}
/* copy inputs to device */
checkCUDA( hipMemcpy( d_a, a, size, hipMemcpyHostToDevice ) );
checkCUDA( hipMemcpy( d_b, b, size, hipMemcpyHostToDevice ) );
/* launch the kernel on the GPU */
/* insert the launch parameters to launch properly using blocks and threads */
int numBlocks = N / THREADS_PER_BLOCK + 1;
hipLaunchKernelGGL(( add), dim3(numBlocks), dim3(THREADS_PER_BLOCK) , 0, 0, N, d_a, d_b, d_c );
checkKERNEL()
/* copy result back to host */
checkCUDA( hipMemcpy( c, d_c, size, hipMemcpyDeviceToHost ) );
int success = 1;
for( int i = 0; i < N; i++ )
{
if( c[i] != a[i] + b[i] )
{
printf("c[%d] = %d\n",i,c[i] );
success = 0;
break;
} /* end if */
}
if( success == 1 ) printf("PASS\n");
else printf("FAIL\n");
/* clean up */
free(a);
free(b);
free(c);
checkCUDA( hipFree( d_a ) );
checkCUDA( hipFree( d_b ) );
checkCUDA( hipFree( d_c ) );
checkCUDA( hipDeviceReset() );
return 0;
} /* end main */
| 0a9bf5dae76a6d1e3f68c6579425fee62a48eeac.cu | /*
* Copyright 2015 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include "../debug.h"
__global__ void add(int n, int *a, int *b, int *c)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n)
c[index] = a[index] + b[index];
}
#define N (2048*2048)
#define THREADS_PER_BLOCK 512
int main()
{
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N * sizeof( int );
/* get GPU device number and name */
int dev;
cudaDeviceProp deviceProp;
checkCUDA( cudaGetDevice( &dev ) );
checkCUDA( cudaGetDeviceProperties( &deviceProp, dev ) );
printf("Using GPU %d: %s\n", dev, deviceProp.name );
/* allocate space for device copies of a, b, c */
checkCUDA( cudaMalloc( (void **) &d_a, size ) );
checkCUDA( cudaMalloc( (void **) &d_b, size ) );
checkCUDA( cudaMalloc( (void **) &d_c, size ) );
/* allocate space for host copies of a, b, c and setup input values */
a = (int *)malloc( size );
b = (int *)malloc( size );
c = (int *)malloc( size );
for( int i = 0; i < N; i++ )
{
a[i] = b[i] = i;
c[i] = 0;
}
/* copy inputs to device */
checkCUDA( cudaMemcpy( d_a, a, size, cudaMemcpyHostToDevice ) );
checkCUDA( cudaMemcpy( d_b, b, size, cudaMemcpyHostToDevice ) );
/* launch the kernel on the GPU */
/* insert the launch parameters to launch properly using blocks and threads */
int numBlocks = N / THREADS_PER_BLOCK + 1;
add<<< numBlocks, THREADS_PER_BLOCK >>>(N, d_a, d_b, d_c );
checkKERNEL()
/* copy result back to host */
checkCUDA( cudaMemcpy( c, d_c, size, cudaMemcpyDeviceToHost ) );
int success = 1;
for( int i = 0; i < N; i++ )
{
if( c[i] != a[i] + b[i] )
{
printf("c[%d] = %d\n",i,c[i] );
success = 0;
break;
} /* end if */
}
if( success == 1 ) printf("PASS\n");
else printf("FAIL\n");
/* clean up */
free(a);
free(b);
free(c);
checkCUDA( cudaFree( d_a ) );
checkCUDA( cudaFree( d_b ) );
checkCUDA( cudaFree( d_c ) );
checkCUDA( cudaDeviceReset() );
return 0;
} /* end main */
|
75c7d98519b9b0fbfa5c6191a440195195053e28.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define CHECK(call) \
{ \
const hipError_t error = call; \
if (error != hipSuccess) { \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
hipGetErrorString(error)); \
exit(1); \
} \
}
struct GpuTimer {
hipEvent_t start;
hipEvent_t stop;
GpuTimer() {
hipEventCreate(&start);
hipEventCreate(&stop);
}
~GpuTimer() {
hipEventDestroy(start);
hipEventDestroy(stop);
}
void Start() {
hipEventRecord(start, 0);
hipEventSynchronize(start);
}
void Stop() { hipEventRecord(stop, 0); }
float Elapsed() {
float elapsed;
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed, start, stop);
return elapsed;
}
};
void printArr(int *arr, int n) {
for (int i = 0; i < n; i += 1) {
printf("%d ", arr[i]);
}
printf("\n");
}
/*
Scan within each block's data (work-inefficient), write results to "out", and
write each block's sum to "blkSums" if "blkSums" is not NULL.
*/
__global__ void scanBlkKernel(int *in, int n, int *out, int *blkSums) {
// TODO
extern __shared__ int section[];
int i = blockIdx.x * blockDim.x + threadIdx.x;
// exclusive scan
if (i < n && threadIdx.x != 0) {
section[threadIdx.x] = in[i - 1];
} else {
section[threadIdx.x] = 0;
}
__syncthreads();
for (int stride = 1; stride < blockDim.x; stride *= 2) {
// copy temp section[threadIdx.x - stride] before changed
int temp = 0;
if (stride <= threadIdx.x)
temp = section[threadIdx.x - stride];
__syncthreads();
section[threadIdx.x] += temp;
__syncthreads();
}
__syncthreads();
if (i < n) {
out[i] = section[threadIdx.x];
}
// copy to blkSums
__syncthreads();
if (blkSums != NULL && threadIdx.x == 0) {
blkSums[blockIdx.x] = section[blockDim.x - 1];
}
// exclusive missing final index in 1 block "in"
__syncthreads();
if (i < n && threadIdx.x == blockDim.x - 1){
blkSums[blockIdx.x] += in[i];
}
}
// TODO: You can define necessary functions here
// add one of the blkSums elements to all in elements
__global__ void addScannedBlkSums(int *out, int n, int *blkSums) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n && blockIdx.x > 0) {
out[i] += blkSums[blockIdx.x - 1];
}
}
void scan(int *in, int n, int *out, bool useDevice = false,
dim3 blkSize = dim3(1)) {
GpuTimer timer;
timer.Start();
if (useDevice == false) {
printf("\nScan by host\n");
// exclusive
out[0] = 0;
for (int i = 1; i < n; i++) {
out[i] = out[i - 1] + in[i - 1];
}
} else // Use device
{
printf("\nScan by device\n");
// TODO
dim3 gridSize((n - 1) / blkSize.x + 1);
printf("GridSize.x: %d\n", gridSize.x);
int *d_in, *d_out, *d_blkSums;
CHECK(hipMalloc(&d_in, n * sizeof(int)));
CHECK(hipMalloc(&d_out, n * sizeof(int)));
CHECK(hipMalloc(&d_blkSums, gridSize.x * sizeof(int)));
CHECK(hipMemcpy(d_in, in, n * sizeof(int), hipMemcpyHostToDevice));
// scan on in and write to out and blkSums
hipLaunchKernelGGL(( scanBlkKernel), dim3(gridSize), dim3(blkSize), blkSize.x * sizeof(int), 0,
d_in, n, d_out, d_blkSums);
CHECK(hipDeviceSynchronize());
// temp blkSums
int *blkSums = (int *)malloc(gridSize.x * sizeof(int));
CHECK(hipMemcpy(blkSums, d_blkSums, gridSize.x * sizeof(int),
hipMemcpyDeviceToHost));
// Inclusive scan blkSums
// printArr(blkSums, gridSize.x);
for (int i = 1; i < gridSize.x; i += 1)
blkSums[i] += blkSums[i - 1];
// printArr(blkSums, gridSize.x);
CHECK(hipMemcpy(d_blkSums, blkSums, gridSize.x * sizeof(int),
hipMemcpyHostToDevice));
free(blkSums);
// add blkSums to out
hipLaunchKernelGGL(( addScannedBlkSums), dim3(gridSize), dim3(blkSize), 0, 0, d_out, n, d_blkSums);
CHECK(hipDeviceSynchronize());
CHECK(hipMemcpy(out, d_out, n * sizeof(int), hipMemcpyDeviceToHost));
CHECK(hipFree(d_in));
CHECK(hipFree(d_out));
CHECK(hipFree(d_blkSums));
}
timer.Stop();
printf("Processing time: %.3f ms\n", timer.Elapsed());
}
void printDeviceInfo() {
hipDeviceProp_t devProv;
CHECK(hipGetDeviceProperties(&devProv, 0));
printf("**********GPU info**********\n");
printf("Name: %s\n", devProv.name);
printf("Compute capability: %d.%d\n", devProv.major, devProv.minor);
printf("Num SMs: %d\n", devProv.multiProcessorCount);
printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor);
printf("Max num warps per SM: %d\n",
devProv.maxThreadsPerMultiProcessor / devProv.warpSize);
printf("GMEM: %zu byte\n", devProv.totalGlobalMem);
printf("SMEM per SM: %zu byte\n", devProv.sharedMemPerMultiprocessor);
printf("SMEM per block: %zu byte\n", devProv.sharedMemPerBlock);
printf("****************************\n");
}
void checkCorrectness(int *out, int *correctOut, int n) {
for (int i = 0; i < n; i++) {
if (out[i] != correctOut[i]) {
printf("INCORRECT :(\n");
return;
}
}
printf("CORRECT :)\n");
}
int main(int argc, char **argv) {
// PRINT OUT DEVICE INFO
printDeviceInfo();
// SET UP INPUT SIZE
int n = (1 << 24) + 1;
// n = 512;
printf("\nInput size: %d\n", n);
// ALLOCATE MEMORIES
size_t bytes = n * sizeof(int);
int *in = (int *)malloc(bytes);
int *out = (int *)malloc(bytes); // Device result
int *correctOut = (int *)malloc(bytes); // Host result
// SET UP INPUT DATA
for (int i = 0; i < n; i++)
in[i] = (int)(rand() & 0xFF) - 127; // random int in [-127, 128]
// DETERMINE BLOCK SIZE
dim3 blockSize(512);
if (argc == 2) {
blockSize.x = atoi(argv[1]);
}
// TEST
// printArr(in, n);
// SCAN BY HOST
scan(in, n, correctOut);
// TEST
// printArr(correctOut, n);
// SCAN BY DEVICE
scan(in, n, out, true, blockSize);
checkCorrectness(out, correctOut, n);
// TEST
// printArr(out, n);
// FREE MEMORIES
free(in);
free(out);
free(correctOut);
return EXIT_SUCCESS;
}
| 75c7d98519b9b0fbfa5c6191a440195195053e28.cu | #include <stdio.h>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) { \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
cudaGetErrorString(error)); \
exit(1); \
} \
}
struct GpuTimer {
cudaEvent_t start;
cudaEvent_t stop;
GpuTimer() {
cudaEventCreate(&start);
cudaEventCreate(&stop);
}
~GpuTimer() {
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
void Start() {
cudaEventRecord(start, 0);
cudaEventSynchronize(start);
}
void Stop() { cudaEventRecord(stop, 0); }
float Elapsed() {
float elapsed;
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
return elapsed;
}
};
void printArr(int *arr, int n) {
for (int i = 0; i < n; i += 1) {
printf("%d ", arr[i]);
}
printf("\n");
}
/*
Scan within each block's data (work-inefficient), write results to "out", and
write each block's sum to "blkSums" if "blkSums" is not NULL.
*/
__global__ void scanBlkKernel(int *in, int n, int *out, int *blkSums) {
// TODO
extern __shared__ int section[];
int i = blockIdx.x * blockDim.x + threadIdx.x;
// exclusive scan
if (i < n && threadIdx.x != 0) {
section[threadIdx.x] = in[i - 1];
} else {
section[threadIdx.x] = 0;
}
__syncthreads();
for (int stride = 1; stride < blockDim.x; stride *= 2) {
// copy temp section[threadIdx.x - stride] before changed
int temp = 0;
if (stride <= threadIdx.x)
temp = section[threadIdx.x - stride];
__syncthreads();
section[threadIdx.x] += temp;
__syncthreads();
}
__syncthreads();
if (i < n) {
out[i] = section[threadIdx.x];
}
// copy to blkSums
__syncthreads();
if (blkSums != NULL && threadIdx.x == 0) {
blkSums[blockIdx.x] = section[blockDim.x - 1];
}
// exclusive missing final index in 1 block "in"
__syncthreads();
if (i < n && threadIdx.x == blockDim.x - 1){
blkSums[blockIdx.x] += in[i];
}
}
// TODO: You can define necessary functions here
// add one of the blkSums elements to all in elements
__global__ void addScannedBlkSums(int *out, int n, int *blkSums) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n && blockIdx.x > 0) {
out[i] += blkSums[blockIdx.x - 1];
}
}
void scan(int *in, int n, int *out, bool useDevice = false,
dim3 blkSize = dim3(1)) {
GpuTimer timer;
timer.Start();
if (useDevice == false) {
printf("\nScan by host\n");
// exclusive
out[0] = 0;
for (int i = 1; i < n; i++) {
out[i] = out[i - 1] + in[i - 1];
}
} else // Use device
{
printf("\nScan by device\n");
// TODO
dim3 gridSize((n - 1) / blkSize.x + 1);
printf("GridSize.x: %d\n", gridSize.x);
int *d_in, *d_out, *d_blkSums;
CHECK(cudaMalloc(&d_in, n * sizeof(int)));
CHECK(cudaMalloc(&d_out, n * sizeof(int)));
CHECK(cudaMalloc(&d_blkSums, gridSize.x * sizeof(int)));
CHECK(cudaMemcpy(d_in, in, n * sizeof(int), cudaMemcpyHostToDevice));
// scan on in and write to out and blkSums
scanBlkKernel<<<gridSize, blkSize, blkSize.x * sizeof(int)>>>(
d_in, n, d_out, d_blkSums);
CHECK(cudaDeviceSynchronize());
// temp blkSums
int *blkSums = (int *)malloc(gridSize.x * sizeof(int));
CHECK(cudaMemcpy(blkSums, d_blkSums, gridSize.x * sizeof(int),
cudaMemcpyDeviceToHost));
// Inclusive scan blkSums
// printArr(blkSums, gridSize.x);
for (int i = 1; i < gridSize.x; i += 1)
blkSums[i] += blkSums[i - 1];
// printArr(blkSums, gridSize.x);
CHECK(cudaMemcpy(d_blkSums, blkSums, gridSize.x * sizeof(int),
cudaMemcpyHostToDevice));
free(blkSums);
// add blkSums to out
addScannedBlkSums<<<gridSize, blkSize>>>(d_out, n, d_blkSums);
CHECK(cudaDeviceSynchronize());
CHECK(cudaMemcpy(out, d_out, n * sizeof(int), cudaMemcpyDeviceToHost));
CHECK(cudaFree(d_in));
CHECK(cudaFree(d_out));
CHECK(cudaFree(d_blkSums));
}
timer.Stop();
printf("Processing time: %.3f ms\n", timer.Elapsed());
}
void printDeviceInfo() {
cudaDeviceProp devProv;
CHECK(cudaGetDeviceProperties(&devProv, 0));
printf("**********GPU info**********\n");
printf("Name: %s\n", devProv.name);
printf("Compute capability: %d.%d\n", devProv.major, devProv.minor);
printf("Num SMs: %d\n", devProv.multiProcessorCount);
printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor);
printf("Max num warps per SM: %d\n",
devProv.maxThreadsPerMultiProcessor / devProv.warpSize);
printf("GMEM: %zu byte\n", devProv.totalGlobalMem);
printf("SMEM per SM: %zu byte\n", devProv.sharedMemPerMultiprocessor);
printf("SMEM per block: %zu byte\n", devProv.sharedMemPerBlock);
printf("****************************\n");
}
void checkCorrectness(int *out, int *correctOut, int n) {
for (int i = 0; i < n; i++) {
if (out[i] != correctOut[i]) {
printf("INCORRECT :(\n");
return;
}
}
printf("CORRECT :)\n");
}
int main(int argc, char **argv) {
// PRINT OUT DEVICE INFO
printDeviceInfo();
// SET UP INPUT SIZE
int n = (1 << 24) + 1;
// n = 512;
printf("\nInput size: %d\n", n);
// ALLOCATE MEMORIES
size_t bytes = n * sizeof(int);
int *in = (int *)malloc(bytes);
int *out = (int *)malloc(bytes); // Device result
int *correctOut = (int *)malloc(bytes); // Host result
// SET UP INPUT DATA
for (int i = 0; i < n; i++)
in[i] = (int)(rand() & 0xFF) - 127; // random int in [-127, 128]
// DETERMINE BLOCK SIZE
dim3 blockSize(512);
if (argc == 2) {
blockSize.x = atoi(argv[1]);
}
// TEST
// printArr(in, n);
// SCAN BY HOST
scan(in, n, correctOut);
// TEST
// printArr(correctOut, n);
// SCAN BY DEVICE
scan(in, n, out, true, blockSize);
checkCorrectness(out, correctOut, n);
// TEST
// printArr(out, n);
// FREE MEMORIES
free(in);
free(out);
free(correctOut);
return EXIT_SUCCESS;
}
|
19fd57bdbaa5233ed192087c58eea9e1f1dbf6c2.hip | // !!! This is a file automatically generated by hipify!!!
#include <random>
#include <vector>
#include <tuple>
#include <cstdio>
#include <cstdlib>
#include <functional>
#include <algorithm>
#include "SyncedMemory.h"
#include "Timer.h"
#include "counting.h"
using namespace std;
#define CHECK {\
auto e = hipDeviceSynchronize();\
if (e != hipSuccess) {\
printf("At " __FILE__ ":%d, %s\n", __LINE__, hipGetErrorString(e));\
abort();\
}\
}
template <typename Engine>
tuple<vector<char>, vector<int>, vector<int>> GenerateTestCase(Engine &eng, const int N) {
poisson_distribution<int> pd(14.0);
bernoulli_distribution bd(0.1);
uniform_int_distribution<int> id1(1, 20);
uniform_int_distribution<int> id2(1, 5);
uniform_int_distribution<int> id3('a', 'z');
tuple<vector<char>, vector<int>, vector<int>> ret;
auto &text = get<0>(ret);
auto &pos = get<1>(ret);
auto &head = get<2>(ret);
auto gen_rand_word_len = [&] () -> int {
return max(1, min(500, pd(eng) - 5 + (bd(eng) ? id1(eng)*20 : 0)));
};
auto gen_rand_space_len = [&] () -> int {
return id2(eng);
};
auto gen_rand_char = [&] () {
return id3(eng);
};
auto AddWord = [&] () {
head.push_back(text.size());
int n = gen_rand_word_len();
for (int i = 0; i < n; ++i) {
text.push_back(gen_rand_char());
pos.push_back(i+1);
}
};
auto AddSpace = [&] () {
int n = gen_rand_space_len();
for (int i = 0; i < n; ++i) {
text.push_back('\n');
pos.push_back(0);
}
};
AddWord();
while (text.size() < N) {
AddSpace();
AddWord();
}
return ret;
}
int main(int argc, char **argv)
{
// Initialize random text
default_random_engine engine(12345);
auto text_pos_head = GenerateTestCase(engine, 20); // 40 MB data
vector<char> &text = get<0>(text_pos_head);
vector<int> &pos = get<1>(text_pos_head);
vector<int> &head = get<2>(text_pos_head);
// Prepare buffers
int n = text.size();
char *text_gpu;
hipMalloc(&text_gpu, sizeof(char)*n);
SyncedMemory<char> text_sync(text.data(), text_gpu, n);
text_sync.get_cpu_wo(); // touch the cpu data
MemoryBuffer<int> pos_yours(n), head_yours(n);
auto pos_yours_sync = pos_yours.CreateSync(n);
auto head_yours_sync = head_yours.CreateSync(n);
// Create timers
Timer timer_count_position;
// Part I
int *pos_yours_gpu = pos_yours_sync.get_gpu_wo();
hipMemset(pos_yours_gpu, 0, sizeof(int)*n);
CHECK;
// Part I check
const int *golden = pos.data();
const int *yours = pos_yours_sync.get_cpu_ro();
// Part II
int *head_yours_gpu = head_yours_sync.get_gpu_wo();
hipMemset(head_yours_gpu, 0, sizeof(int)*n);
CHECK;
// Part II check
n_head = head.size();
copy_n(head.begin(), n_head, head_yours_sync.get_cpu_wo());
// Part III
// Do whatever your want
Part3(text_gpu, pos_yours_sync.get_gpu_rw(), head_yours_sync.get_gpu_rw(), n, n_head);
CHECK;
hipFree(text_gpu);
return 0;
}
| 19fd57bdbaa5233ed192087c58eea9e1f1dbf6c2.cu | #include <random>
#include <vector>
#include <tuple>
#include <cstdio>
#include <cstdlib>
#include <functional>
#include <algorithm>
#include "SyncedMemory.h"
#include "Timer.h"
#include "counting.h"
using namespace std;
#define CHECK {\
auto e = cudaDeviceSynchronize();\
if (e != cudaSuccess) {\
printf("At " __FILE__ ":%d, %s\n", __LINE__, cudaGetErrorString(e));\
abort();\
}\
}
template <typename Engine>
tuple<vector<char>, vector<int>, vector<int>> GenerateTestCase(Engine &eng, const int N) {
poisson_distribution<int> pd(14.0);
bernoulli_distribution bd(0.1);
uniform_int_distribution<int> id1(1, 20);
uniform_int_distribution<int> id2(1, 5);
uniform_int_distribution<int> id3('a', 'z');
tuple<vector<char>, vector<int>, vector<int>> ret;
auto &text = get<0>(ret);
auto &pos = get<1>(ret);
auto &head = get<2>(ret);
auto gen_rand_word_len = [&] () -> int {
return max(1, min(500, pd(eng) - 5 + (bd(eng) ? id1(eng)*20 : 0)));
};
auto gen_rand_space_len = [&] () -> int {
return id2(eng);
};
auto gen_rand_char = [&] () {
return id3(eng);
};
auto AddWord = [&] () {
head.push_back(text.size());
int n = gen_rand_word_len();
for (int i = 0; i < n; ++i) {
text.push_back(gen_rand_char());
pos.push_back(i+1);
}
};
auto AddSpace = [&] () {
int n = gen_rand_space_len();
for (int i = 0; i < n; ++i) {
text.push_back('\n');
pos.push_back(0);
}
};
AddWord();
while (text.size() < N) {
AddSpace();
AddWord();
}
return ret;
}
int main(int argc, char **argv)
{
// Initialize random text
default_random_engine engine(12345);
auto text_pos_head = GenerateTestCase(engine, 20); // 40 MB data
vector<char> &text = get<0>(text_pos_head);
vector<int> &pos = get<1>(text_pos_head);
vector<int> &head = get<2>(text_pos_head);
// Prepare buffers
int n = text.size();
char *text_gpu;
cudaMalloc(&text_gpu, sizeof(char)*n);
SyncedMemory<char> text_sync(text.data(), text_gpu, n);
text_sync.get_cpu_wo(); // touch the cpu data
MemoryBuffer<int> pos_yours(n), head_yours(n);
auto pos_yours_sync = pos_yours.CreateSync(n);
auto head_yours_sync = head_yours.CreateSync(n);
// Create timers
Timer timer_count_position;
// Part I
int *pos_yours_gpu = pos_yours_sync.get_gpu_wo();
cudaMemset(pos_yours_gpu, 0, sizeof(int)*n);
CHECK;
// Part I check
const int *golden = pos.data();
const int *yours = pos_yours_sync.get_cpu_ro();
// Part II
int *head_yours_gpu = head_yours_sync.get_gpu_wo();
cudaMemset(head_yours_gpu, 0, sizeof(int)*n);
CHECK;
// Part II check
n_head = head.size();
copy_n(head.begin(), n_head, head_yours_sync.get_cpu_wo());
// Part III
// Do whatever your want
Part3(text_gpu, pos_yours_sync.get_gpu_rw(), head_yours_sync.get_gpu_rw(), n, n_head);
CHECK;
cudaFree(text_gpu);
return 0;
}
|
7e3f24e3737133aa5676918d610aea048f7b6e57.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
__device__ double dot_prod_3_d_gpu(double * v1, double * v2)
{
double tmp = 0.0;
for (int i = 0; i < 3; ++i) tmp += v1[i] * v2[i];
return tmp;
}
__device__ float dot_prod_3_f_gpu(float * v1, float * v2)
{
float tmp = 0.0;
for (int i = 0; i < 3; ++i) tmp += v1[i] * v2[i];
return tmp;
}
__device__ int dot_prod_3_i_gpu(int * v1, int * v2)
{
int tmp = 0.0;
for (int i = 0; i < 3; ++i) tmp += v1[i] * v2[i];
return tmp;
} | 7e3f24e3737133aa5676918d610aea048f7b6e57.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
__device__ double dot_prod_3_d_gpu(double * v1, double * v2)
{
double tmp = 0.0;
for (int i = 0; i < 3; ++i) tmp += v1[i] * v2[i];
return tmp;
}
__device__ float dot_prod_3_f_gpu(float * v1, float * v2)
{
float tmp = 0.0;
for (int i = 0; i < 3; ++i) tmp += v1[i] * v2[i];
return tmp;
}
__device__ int dot_prod_3_i_gpu(int * v1, int * v2)
{
int tmp = 0.0;
for (int i = 0; i < 3; ++i) tmp += v1[i] * v2[i];
return tmp;
} |
d624b58421010281fdf0d551679bcaa1e9c3f10a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ------------------------------------------------------------------
// Fast R-CNN
// Copyright (c) 2015 Microsoft
// Licensed under The MIT License [see fast-rcnn/LICENSE for details]
// Written by Ross Girshick
// ------------------------------------------------------------------
#include <cfloat>
#include "caffe/layers/nearest_neighbor_layer.hpp"
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__global__ void NearestNeighborForward(const int nthreads, const Dtype* bottom_data,
const int channels, const int height, const int width,
const int top_height, const int top_width, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int w = index % top_width;
int h = (index / top_width) % top_height;
int c = (index / top_width / top_height) % channels;
int n = index / top_width / top_height / channels;
int bottom_index = (n*channels + c) * height * width;
bottom_index += (h/2) * width + (w/2);
top_data[index] = bottom_data[bottom_index];
}
}
template <typename Dtype>
void NearestNeighborLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int count = top[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( NearestNeighborForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, channels_, height_, width_, top_height_, top_width_, top_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void NearestNeighborBackward(const int nthreads, const Dtype* top_diff,
const int channels, const int height, const int width,
const int top_height, const int top_width, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
Dtype gradient = 0;
int top_index = (n*channels+c)*top_height*top_width;
const Dtype *offset_top_diff = top_diff + top_index;
gradient += offset_top_diff[2*h*top_width + 2*w];
gradient += offset_top_diff[(2*h+1)*top_width + 2*w];
gradient += offset_top_diff[2*h*top_width + (2*w+1)];
gradient += offset_top_diff[(2*h+1)*top_width + (2*w+1)];
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void NearestNeighborLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( NearestNeighborBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, channels_, height_, width_,
top_height_, top_width_, bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(NearestNeighborLayer);
} // namespace caffe
| d624b58421010281fdf0d551679bcaa1e9c3f10a.cu | // ------------------------------------------------------------------
// Fast R-CNN
// Copyright (c) 2015 Microsoft
// Licensed under The MIT License [see fast-rcnn/LICENSE for details]
// Written by Ross Girshick
// ------------------------------------------------------------------
#include <cfloat>
#include "caffe/layers/nearest_neighbor_layer.hpp"
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__global__ void NearestNeighborForward(const int nthreads, const Dtype* bottom_data,
const int channels, const int height, const int width,
const int top_height, const int top_width, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int w = index % top_width;
int h = (index / top_width) % top_height;
int c = (index / top_width / top_height) % channels;
int n = index / top_width / top_height / channels;
int bottom_index = (n*channels + c) * height * width;
bottom_index += (h/2) * width + (w/2);
top_data[index] = bottom_data[bottom_index];
}
}
template <typename Dtype>
void NearestNeighborLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int count = top[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
NearestNeighborForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, channels_, height_, width_, top_height_, top_width_, top_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void NearestNeighborBackward(const int nthreads, const Dtype* top_diff,
const int channels, const int height, const int width,
const int top_height, const int top_width, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
Dtype gradient = 0;
int top_index = (n*channels+c)*top_height*top_width;
const Dtype *offset_top_diff = top_diff + top_index;
gradient += offset_top_diff[2*h*top_width + 2*w];
gradient += offset_top_diff[(2*h+1)*top_width + 2*w];
gradient += offset_top_diff[2*h*top_width + (2*w+1)];
gradient += offset_top_diff[(2*h+1)*top_width + (2*w+1)];
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void NearestNeighborLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
// NOLINT_NEXT_LINE(whitespace/operators)
NearestNeighborBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, channels_, height_, width_,
top_height_, top_width_, bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(NearestNeighborLayer);
} // namespace caffe
|
bb7db3e7522e6974d5527fd26db3ce0242ee8a36.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "tmwtypes.h"
// Note: on GTX Titan Black, optimal number of threads/images is 64 (Ni = 64).
// Set isevenX = 1 if kernel is even in X, iseven = 0 if odd.
__global__ void superconv2(float *Mout, const float *M, const float *K,
const int32_T Nmx, const int32_T Nmy,
const int32_T Nkx, const int32_T Nky,
const int32_T Nkxh, const int32_T Nkyh,
const int32_T isevenX, const int32_T isevenY)
{
int32_T x = blockIdx.x; // row of output pixel
int32_T y = blockIdx.y; // column of output pixel
int32_T X = gridDim.x; // map output width in X
int32_T Y = gridDim.y; // map output height in Y
//int32_T Zk = blockDim.x; // number of 2D kernels (kernel depth)
int32_T zk = threadIdx.x; // map number (3rd dimension of M array)
int32_T mx = x + Nkxh;
int32_T my = y + Nkyh;
float res = 0;
int32_T i, j;
#pragma unroll 10
for (i=-Nkxh; i<=Nkxh-isevenX; i++) {
#pragma unroll 10
for (j=-Nkyh; j<=Nkyh-isevenY; j++) {
// loop only over 1st and 2nd dimensions
res += K[Nkx*Nky*zk + (j+Nkyh)*Nkx + (i+Nkxh)] *
M[Nmx*Nmy*zk + Nmx*(my+j) + (mx+i)];
}
}
// (Nm-Nk+1, Nm-Nk+1, Nkz, Ni)
Mout[X*Y*zk + X*y + x] = res;
}
| bb7db3e7522e6974d5527fd26db3ce0242ee8a36.cu | #include "tmwtypes.h"
// Note: on GTX Titan Black, optimal number of threads/images is 64 (Ni = 64).
// Set isevenX = 1 if kernel is even in X, iseven = 0 if odd.
__global__ void superconv2(float *Mout, const float *M, const float *K,
const int32_T Nmx, const int32_T Nmy,
const int32_T Nkx, const int32_T Nky,
const int32_T Nkxh, const int32_T Nkyh,
const int32_T isevenX, const int32_T isevenY)
{
int32_T x = blockIdx.x; // row of output pixel
int32_T y = blockIdx.y; // column of output pixel
int32_T X = gridDim.x; // map output width in X
int32_T Y = gridDim.y; // map output height in Y
//int32_T Zk = blockDim.x; // number of 2D kernels (kernel depth)
int32_T zk = threadIdx.x; // map number (3rd dimension of M array)
int32_T mx = x + Nkxh;
int32_T my = y + Nkyh;
float res = 0;
int32_T i, j;
#pragma unroll 10
for (i=-Nkxh; i<=Nkxh-isevenX; i++) {
#pragma unroll 10
for (j=-Nkyh; j<=Nkyh-isevenY; j++) {
// loop only over 1st and 2nd dimensions
res += K[Nkx*Nky*zk + (j+Nkyh)*Nkx + (i+Nkxh)] *
M[Nmx*Nmy*zk + Nmx*(my+j) + (mx+i)];
}
}
// (Nm-Nk+1, Nm-Nk+1, Nkz, Ni)
Mout[X*Y*zk + X*y + x] = res;
}
|
ce9341eb5253f9e8ff0af33b042d989c858c4a6a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2012 The Trustees of Indiana University. All rights reserved.
CGL MapReduce Framework on GPUs and CPUs
Code Name: Panda
File: reduce.cu
First Version: 2012-07-01 V0.1
Current Version: 2012-09-01 V0.3
Last Updates: 2012-09-02
Developer: Hui Li (lihui@indiana.edu)
This is the source code for Panda, a MapReduce runtime on GPUs and CPUs.
*/
#ifndef __USER_CU__
#define __USER_CU__
#include "Panda.h"
#include "UserAPI.h"
//-------------------------------------------------------------------------
//Reduce Function in this application
//-------------------------------------------------------------------------
__device__ int gpu_compare(const void *d_a, int len_a, const void *d_b, int len_b)
{
return 0;
}
__device__ void gpu_combiner(void *KEY, val_t* VAL, int keySize, int valCount, gpu_context *d_g_state, int map_task_idx)
{
return;
}
__device__ void gpu_reduce(void *KEY, val_t* VAL, int keySize, int valCount, gpu_context d_g_state){
return;
}//reduce2
__device__ float operator*(float4 a, float4 b)
{
return (a.x*b.x+a.y*b.y+a.z*b.z+a.w*b.w);
}//__device__
int cpu_compare(const void *d_a, int len_a, const void *d_b, int len_b)
{
return 0;
}
void cpu_combiner(void *KEY, val_t* VAL, int keySize, int valCount, cpu_context *d_g_state, int map_task_idx)
{
return;
}
void cpu_reduce(void *KEY, val_t* VAL, int keySize, int valCount, cpu_context* d_g_state){
return;
}//reduce2
void cpu_1d_blocked_matrix(float *A, float *B, float *C, int wA,int start_task_id,int end_id, int bz);
void cpu_2d_blocked_matrix(float *A, float *B, float *C, int wA,int row_id,int col_id, int bz);
void cpu_map(void *KEY, void*VAL, int keySize, int valSize, cpu_context *d_g_state, int map_task_idx){
MM_KEY_T* pKey = ((MM_KEY_T*)KEY);
MM_VAL_T* pVal = ((MM_VAL_T*)VAL);
int rowId = pVal->row;
int colId = pVal->col;
int bz = MATRIX_BLOCK_SIZE;
int wA = pVal->col_dim;
int wB = pVal->col_dim;
float *A = pKey->h_matrix1;
float *B = pKey->h_matrix2;
float *C = pKey->h_matrix3;
cpu_1d_blocked_matrix(A, B, C, wA,rowId,colId,bz);
}//map2
void gpu_card_map(void *key, void *val, int keySize, int valSize, gpu_card_context *d_g_state, int map_task_idx){
}//void
int gpu_card_compare(const void *d_a, int len_a, const void *d_b, int len_b){
return 0;
}//int
//Last update 9/2/2012
//blocked matrix useful
__device__ void gpu_map1(void *KEY, void*VAL, int keySize, int valSize, gpu_context *d_g_state, int map_task_idx){
MM_KEY_T* pKey = ((MM_KEY_T*)KEY);
MM_VAL_T* pVal = ((MM_VAL_T*)VAL);
int wA = pVal->col_dim;
int wB = pVal->col_dim;
int m = wA;
int bz = MATRIX_BLOCK_SIZE;
float Csub = 0.0;
float *A = pKey->matrix1;
float *B = pKey->matrix2;
float *C = pKey->matrix3;
float4*As = (float4*)A;
float4*Bs = (float4*)B;
int i,j,k;
int start_row_id_a_matrix = pVal->row*bz;
int start_row_id_b_matrix = pVal->col*bz;
int aHeight = bz;
int aHeightBlocks = aHeight/bz;
int aLastBlockHeight = aHeight - (aHeightBlocks*bz);
if (aLastBlockHeight>0){
aHeightBlocks++;
}//if
int bWidth = bz;
int bWidthBlocks = bWidth/bz;
int bLastBlockWidth = bWidth - (bWidthBlocks*bz);
if (bLastBlockWidth>0){
bWidthBlocks++;
}//if
int commBlocks = m/bz;
int commLastBlockWidth = m - (commBlocks*bz);
if (commLastBlockWidth >0){
commBlocks++;
}//fi
int aBlockHeight = bz;
int bBlockWidth = bz;
int commBlockWidth = bz;
int ib,jb,kb;
float4 b4,c4;
float aik;
for (ib=0; ib<aHeightBlocks; ib++){
if (aLastBlockHeight>0 && ib==(aHeightBlocks-1)){
aBlockHeight = aLastBlockHeight;
}//if
bBlockWidth = bz;
for (jb=0; jb<bWidthBlocks;jb++){
if (bLastBlockWidth>0&&jb==(bWidthBlocks-1))
bBlockWidth = bLastBlockWidth;
commBlockWidth = bz;
for (kb =0;kb<commBlocks;kb++){
if (commLastBlockWidth>0 && kb==(commBlocks-1))
commBlockWidth = commLastBlockWidth;
for (i = start_row_id_a_matrix + ib*bz;i<start_row_id_a_matrix+(ib*bz)+aBlockHeight;i++){
for (k = kb*bz;k<(kb*bz)+(commBlockWidth);k++){
aik = A[i*m+k];
float4 *Bsub = (float4*)(B+k*m+jb*bz);
float4 *Csub = (float4*)(C+i*m+jb*bz);
//for (j= jb*bz;j<(jb*bz)+(bBlockWidth)/4;j++){
for (j=0; j<(bBlockWidth/4); j++){
b4 = *((Bsub)+j);
c4 = *((Csub)+j);
c4.x += aik*b4.x;
c4.y += aik*b4.y;
c4.z += aik*b4.z;
c4.w += aik*b4.w;
*((Csub)+j) = c4;
//(C[i*m+j]+=A[i*m+k]*B[k*m+j];
}//for
int indexBase = jb*bz+4*(bBlockWidth/4);
for (int rj=0; rj<(bBlockWidth%4); rj++){
int index = indexBase + rj;
C[i*m+index] += aik*(*(B+ k*m +index));
}
}
}//for
}//for
}//for
}//for
//check results
/*if (map_task_idx == 1){
for (int j=10;j<20;j++)
for (int i=0;i<5;i++){
printf("%f ",C[j*wA+i]);
}
printf("\n");
}*/
}
//Last Update 9/24/2012
__device__ void gpu_core_map(void *KEY, void*VAL, int keySize, int valSize, gpu_context *d_g_state, int map_task_idx){
MM_KEY_T* pKey = ((MM_KEY_T*)KEY);
MM_VAL_T* pVal = ((MM_VAL_T*)VAL);
int wA = pVal->col_dim;
int wB = pVal->col_dim;
//int tbz = pVal->tbz;
//int mbz = pVal->mbz;
int tbz = THREAD_BLOCK_SIZE;
int mbz = MATRIX_BLOCK_SIZE;
int m = wA;
float Csub = 0.0;
float *A = pKey->matrix1;
float *B = pKey->matrix2;
float *C = pKey->matrix3;
int start_row_id_a_matrix = pVal->row*mbz;
int start_row_id_b_matrix = pVal->col*mbz;
int aHeight = mbz;
int aHeightBlocks = aHeight/tbz;
int aLastBlockHeight = aHeight - (aHeightBlocks*tbz);
if (aLastBlockHeight>0){
//aHeightBlocks++;
}//if
int bWidth = mbz;
int bWidthBlocks = bWidth/tbz;
int bLastBlockWidth = bWidth - (bWidthBlocks*tbz);
if (bLastBlockWidth>0){
//bWidthBlocks++;
}//if
int commBlocks = m/tbz;
int commLastBlockWidth = m - (commBlocks*tbz);
if (commLastBlockWidth >0){
//commBlocks++;
}//fi
int aBlockHeight = tbz;
int bBlockWidth = tbz;
int commBlockWidth = tbz;
int ib,jb,kb;
//int bx = blockIdx.x;
//int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
__shared__ float As[THREAD_BLOCK_SIZE][THREAD_BLOCK_SIZE];
__shared__ float Bs[THREAD_BLOCK_SIZE][THREAD_BLOCK_SIZE];
__shared__ int row_id_a;
__shared__ int row_id_b;
//printf("wA:%d wB:%d tx:%d ty:%d\n",wA,wB,tx,ty);
for (ib=0; ib<aHeightBlocks; ib++){
if (aLastBlockHeight>0 && ib==(aHeightBlocks-1)){
aBlockHeight = aLastBlockHeight;
}//if
bBlockWidth = tbz;
for (jb=0; jb<bWidthBlocks;jb++){
if (bLastBlockWidth>0 && jb==(bWidthBlocks-1)){
bBlockWidth = bLastBlockWidth;
}
/*commBlockWidth = tbz;
for (kb =0;kb<commBlocks;kb++){
if (commLastBlockWidth>0 && kb==(commBlocks-1))
commBlockWidth = commLastBlockWidth;*/
for (int y=0;y<THREAD_BLOCK_SIZE;y++){
for (int x=0;x<THREAD_BLOCK_SIZE;x++){
Csub = 0.0;
if (y*THREAD_BLOCK_SIZE+x==THREAD_ID){
row_id_a = start_row_id_a_matrix+ib*tbz;
row_id_b = start_row_id_b_matrix+jb*tbz;
}//if
__syncthreads();
int row_id = (row_id_a + ty);
if (row_id >= m) row_id = m-1;
int col_id = (row_id_b + tx);
if (col_id >= m) col_id = m-1;
for (int cb=0; cb<commBlocks; cb++){
As[ty][tx] = A[(row_id)*m + cb*tbz + tx];
Bs[ty][tx] = B[(col_id)*m + cb*tbz + tx];
//if(cb==commBlocks-1)
// printf("row:%d col:%d index:%d As[%d][%d]:%f\n",row_id,col_id,(row_id)*m + cb*tbz + tx,ty,tx,As[ty][tx]);
__syncthreads();
#pragma unroll
for (int k = 0; k<THREAD_BLOCK_SIZE; k++)
Csub += As[ty][k]*Bs[k][tx];
__syncthreads();
}//for
int index = (row_id)*m + (col_id);
if(index>m*m-1){
printf("error! index>m*m-1\n");
index = m*m-1;
}
C[index] = Csub;
printf("Csub:%f\n",Csub);
}//for (int x=0;x<16;x++)
}//for (int y=0;y<16;y++)
}//for (jb=0
}
}
//Last Updated 9/1/2012
//CUDA implementation of Matrix Multiplication useful
__device__ void gpu_map2(void *KEY, void*VAL, int keySize, int valSize, gpu_context *d_g_state, int map_task_idx){
//printf("map_task_idx:%d\n",map_task_idx);
MM_KEY_T* pKey = ((MM_KEY_T*)KEY);
MM_VAL_T* pVal = ((MM_VAL_T*)VAL);
int wA = pVal->col_dim;
int wB = pVal->col_dim;
//int bz = pVal->tbz; //size of each tile
int bz = MATRIX_BLOCK_SIZE;
int m = wA;
float Csub = 0.0;
float *A = pKey->matrix1;
float *B = pKey->matrix2;
float *C = pKey->matrix3;
int start_row_id_a_matrix = pVal->row*bz;
int start_row_id_b_matrix = pVal->col*bz;
int aHeight = bz;
int aHeightBlocks = aHeight/bz;
int aLastBlockHeight = aHeight - (aHeightBlocks*bz);
if (aLastBlockHeight>0){
aHeightBlocks++;
}//if
int bWidth = bz;
int bWidthBlocks = bWidth/bz;
int bLastBlockWidth = bWidth - (bWidthBlocks*bz);
if (bLastBlockWidth>0){
bWidthBlocks++;
}//if
int commBlocks = m/bz;
int commLastBlockWidth = m - (commBlocks*bz);
if (commLastBlockWidth >0){
//commBlocks++;
}//fi
int aBlockHeight = bz;
int bBlockWidth = bz;
int commBlockWidth = bz;
int ib,jb,kb;
//int bx = blockIdx.x;
//int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
__shared__ float As[THREAD_BLOCK_SIZE][THREAD_BLOCK_SIZE];
__shared__ float Bs[THREAD_BLOCK_SIZE][THREAD_BLOCK_SIZE];
__shared__ int row_id_a;
__shared__ int row_id_b;
for (int y=0;y<MATRIX_BLOCK_SIZE;y++){
for (int x=0;x<MATRIX_BLOCK_SIZE;x++){
Csub = 0.0;
if (y*MATRIX_BLOCK_SIZE+x==THREAD_ID){
row_id_a = start_row_id_a_matrix;
row_id_b = start_row_id_b_matrix;
}//if
__syncthreads();
int row_id = (row_id_a+ty);
if (row_id >= m) row_id = m-1;
int col_id = (row_id_b+tx);
if (col_id >= m) col_id = m-1;
for (int cb=0; cb<commBlocks; cb++){
//As[ty][tx] = A[(row_id_a + ty)*m + cb*bz + tx];
//Bs[ty][tx] = B[(row_id_b + ty)*m + cb*bz + tx];
As[ty][tx] = A[(row_id)*m + cb*bz + tx];
Bs[ty][tx] = B[(row_id)*m + cb*bz + tx];
__syncthreads();
#pragma unroll
for (int k = 0; k < MATRIX_BLOCK_SIZE; k++)
Csub += As[ty][k]*Bs[k][tx];
__syncthreads();
}//for
//if ((x==0) && (y==5)&&(map_task_idx%50==1))
// printf("commBlocks:%d map_task_idx:%d tx:%d ty:%d Csub:%f index:%d row_id_a:%d bz:%d THREAD_ID:%d\n",commBlocks, map_task_idx, tx, ty, Csub, index, row_id_a, bz, THREAD_ID);
int index = (row_id)*m + (col_id);
C[index] = Csub;
}//for
}
}
#endif //__USER_CU__ | ce9341eb5253f9e8ff0af33b042d989c858c4a6a.cu | /*
Copyright 2012 The Trustees of Indiana University. All rights reserved.
CGL MapReduce Framework on GPUs and CPUs
Code Name: Panda
File: reduce.cu
First Version: 2012-07-01 V0.1
Current Version: 2012-09-01 V0.3
Last Updates: 2012-09-02
Developer: Hui Li (lihui@indiana.edu)
This is the source code for Panda, a MapReduce runtime on GPUs and CPUs.
*/
#ifndef __USER_CU__
#define __USER_CU__
#include "Panda.h"
#include "UserAPI.h"
//-------------------------------------------------------------------------
//Reduce Function in this application
//-------------------------------------------------------------------------
__device__ int gpu_compare(const void *d_a, int len_a, const void *d_b, int len_b)
{
return 0;
}
__device__ void gpu_combiner(void *KEY, val_t* VAL, int keySize, int valCount, gpu_context *d_g_state, int map_task_idx)
{
return;
}
__device__ void gpu_reduce(void *KEY, val_t* VAL, int keySize, int valCount, gpu_context d_g_state){
return;
}//reduce2
__device__ float operator*(float4 a, float4 b)
{
return (a.x*b.x+a.y*b.y+a.z*b.z+a.w*b.w);
}//__device__
int cpu_compare(const void *d_a, int len_a, const void *d_b, int len_b)
{
return 0;
}
void cpu_combiner(void *KEY, val_t* VAL, int keySize, int valCount, cpu_context *d_g_state, int map_task_idx)
{
return;
}
void cpu_reduce(void *KEY, val_t* VAL, int keySize, int valCount, cpu_context* d_g_state){
return;
}//reduce2
void cpu_1d_blocked_matrix(float *A, float *B, float *C, int wA,int start_task_id,int end_id, int bz);
void cpu_2d_blocked_matrix(float *A, float *B, float *C, int wA,int row_id,int col_id, int bz);
void cpu_map(void *KEY, void*VAL, int keySize, int valSize, cpu_context *d_g_state, int map_task_idx){
MM_KEY_T* pKey = ((MM_KEY_T*)KEY);
MM_VAL_T* pVal = ((MM_VAL_T*)VAL);
int rowId = pVal->row;
int colId = pVal->col;
int bz = MATRIX_BLOCK_SIZE;
int wA = pVal->col_dim;
int wB = pVal->col_dim;
float *A = pKey->h_matrix1;
float *B = pKey->h_matrix2;
float *C = pKey->h_matrix3;
cpu_1d_blocked_matrix(A, B, C, wA,rowId,colId,bz);
}//map2
void gpu_card_map(void *key, void *val, int keySize, int valSize, gpu_card_context *d_g_state, int map_task_idx){
}//void
int gpu_card_compare(const void *d_a, int len_a, const void *d_b, int len_b){
return 0;
}//int
//Last update 9/2/2012
//blocked matrix useful
__device__ void gpu_map1(void *KEY, void*VAL, int keySize, int valSize, gpu_context *d_g_state, int map_task_idx){
MM_KEY_T* pKey = ((MM_KEY_T*)KEY);
MM_VAL_T* pVal = ((MM_VAL_T*)VAL);
int wA = pVal->col_dim;
int wB = pVal->col_dim;
int m = wA;
int bz = MATRIX_BLOCK_SIZE;
float Csub = 0.0;
float *A = pKey->matrix1;
float *B = pKey->matrix2;
float *C = pKey->matrix3;
float4*As = (float4*)A;
float4*Bs = (float4*)B;
int i,j,k;
int start_row_id_a_matrix = pVal->row*bz;
int start_row_id_b_matrix = pVal->col*bz;
int aHeight = bz;
int aHeightBlocks = aHeight/bz;
int aLastBlockHeight = aHeight - (aHeightBlocks*bz);
if (aLastBlockHeight>0){
aHeightBlocks++;
}//if
int bWidth = bz;
int bWidthBlocks = bWidth/bz;
int bLastBlockWidth = bWidth - (bWidthBlocks*bz);
if (bLastBlockWidth>0){
bWidthBlocks++;
}//if
int commBlocks = m/bz;
int commLastBlockWidth = m - (commBlocks*bz);
if (commLastBlockWidth >0){
commBlocks++;
}//fi
int aBlockHeight = bz;
int bBlockWidth = bz;
int commBlockWidth = bz;
int ib,jb,kb;
float4 b4,c4;
float aik;
for (ib=0; ib<aHeightBlocks; ib++){
if (aLastBlockHeight>0 && ib==(aHeightBlocks-1)){
aBlockHeight = aLastBlockHeight;
}//if
bBlockWidth = bz;
for (jb=0; jb<bWidthBlocks;jb++){
if (bLastBlockWidth>0&&jb==(bWidthBlocks-1))
bBlockWidth = bLastBlockWidth;
commBlockWidth = bz;
for (kb =0;kb<commBlocks;kb++){
if (commLastBlockWidth>0 && kb==(commBlocks-1))
commBlockWidth = commLastBlockWidth;
for (i = start_row_id_a_matrix + ib*bz;i<start_row_id_a_matrix+(ib*bz)+aBlockHeight;i++){
for (k = kb*bz;k<(kb*bz)+(commBlockWidth);k++){
aik = A[i*m+k];
float4 *Bsub = (float4*)(B+k*m+jb*bz);
float4 *Csub = (float4*)(C+i*m+jb*bz);
//for (j= jb*bz;j<(jb*bz)+(bBlockWidth)/4;j++){
for (j=0; j<(bBlockWidth/4); j++){
b4 = *((Bsub)+j);
c4 = *((Csub)+j);
c4.x += aik*b4.x;
c4.y += aik*b4.y;
c4.z += aik*b4.z;
c4.w += aik*b4.w;
*((Csub)+j) = c4;
//(C[i*m+j]+=A[i*m+k]*B[k*m+j];
}//for
int indexBase = jb*bz+4*(bBlockWidth/4);
for (int rj=0; rj<(bBlockWidth%4); rj++){
int index = indexBase + rj;
C[i*m+index] += aik*(*(B+ k*m +index));
}
}
}//for
}//for
}//for
}//for
//check results
/*if (map_task_idx == 1){
for (int j=10;j<20;j++)
for (int i=0;i<5;i++){
printf("%f ",C[j*wA+i]);
}
printf("\n");
}*/
}
//Last Update 9/24/2012
__device__ void gpu_core_map(void *KEY, void*VAL, int keySize, int valSize, gpu_context *d_g_state, int map_task_idx){
MM_KEY_T* pKey = ((MM_KEY_T*)KEY);
MM_VAL_T* pVal = ((MM_VAL_T*)VAL);
int wA = pVal->col_dim;
int wB = pVal->col_dim;
//int tbz = pVal->tbz;
//int mbz = pVal->mbz;
int tbz = THREAD_BLOCK_SIZE;
int mbz = MATRIX_BLOCK_SIZE;
int m = wA;
float Csub = 0.0;
float *A = pKey->matrix1;
float *B = pKey->matrix2;
float *C = pKey->matrix3;
int start_row_id_a_matrix = pVal->row*mbz;
int start_row_id_b_matrix = pVal->col*mbz;
int aHeight = mbz;
int aHeightBlocks = aHeight/tbz;
int aLastBlockHeight = aHeight - (aHeightBlocks*tbz);
if (aLastBlockHeight>0){
//aHeightBlocks++;
}//if
int bWidth = mbz;
int bWidthBlocks = bWidth/tbz;
int bLastBlockWidth = bWidth - (bWidthBlocks*tbz);
if (bLastBlockWidth>0){
//bWidthBlocks++;
}//if
int commBlocks = m/tbz;
int commLastBlockWidth = m - (commBlocks*tbz);
if (commLastBlockWidth >0){
//commBlocks++;
}//fi
int aBlockHeight = tbz;
int bBlockWidth = tbz;
int commBlockWidth = tbz;
int ib,jb,kb;
//int bx = blockIdx.x;
//int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
__shared__ float As[THREAD_BLOCK_SIZE][THREAD_BLOCK_SIZE];
__shared__ float Bs[THREAD_BLOCK_SIZE][THREAD_BLOCK_SIZE];
__shared__ int row_id_a;
__shared__ int row_id_b;
//printf("wA:%d wB:%d tx:%d ty:%d\n",wA,wB,tx,ty);
for (ib=0; ib<aHeightBlocks; ib++){
if (aLastBlockHeight>0 && ib==(aHeightBlocks-1)){
aBlockHeight = aLastBlockHeight;
}//if
bBlockWidth = tbz;
for (jb=0; jb<bWidthBlocks;jb++){
if (bLastBlockWidth>0 && jb==(bWidthBlocks-1)){
bBlockWidth = bLastBlockWidth;
}
/*commBlockWidth = tbz;
for (kb =0;kb<commBlocks;kb++){
if (commLastBlockWidth>0 && kb==(commBlocks-1))
commBlockWidth = commLastBlockWidth;*/
for (int y=0;y<THREAD_BLOCK_SIZE;y++){
for (int x=0;x<THREAD_BLOCK_SIZE;x++){
Csub = 0.0;
if (y*THREAD_BLOCK_SIZE+x==THREAD_ID){
row_id_a = start_row_id_a_matrix+ib*tbz;
row_id_b = start_row_id_b_matrix+jb*tbz;
}//if
__syncthreads();
int row_id = (row_id_a + ty);
if (row_id >= m) row_id = m-1;
int col_id = (row_id_b + tx);
if (col_id >= m) col_id = m-1;
for (int cb=0; cb<commBlocks; cb++){
As[ty][tx] = A[(row_id)*m + cb*tbz + tx];
Bs[ty][tx] = B[(col_id)*m + cb*tbz + tx];
//if(cb==commBlocks-1)
// printf("row:%d col:%d index:%d As[%d][%d]:%f\n",row_id,col_id,(row_id)*m + cb*tbz + tx,ty,tx,As[ty][tx]);
__syncthreads();
#pragma unroll
for (int k = 0; k<THREAD_BLOCK_SIZE; k++)
Csub += As[ty][k]*Bs[k][tx];
__syncthreads();
}//for
int index = (row_id)*m + (col_id);
if(index>m*m-1){
printf("error! index>m*m-1\n");
index = m*m-1;
}
C[index] = Csub;
printf("Csub:%f\n",Csub);
}//for (int x=0;x<16;x++)
}//for (int y=0;y<16;y++)
}//for (jb=0
}
}
//Last Updated 9/1/2012
//CUDA implementation of Matrix Multiplication useful
__device__ void gpu_map2(void *KEY, void*VAL, int keySize, int valSize, gpu_context *d_g_state, int map_task_idx){
//printf("map_task_idx:%d\n",map_task_idx);
MM_KEY_T* pKey = ((MM_KEY_T*)KEY);
MM_VAL_T* pVal = ((MM_VAL_T*)VAL);
int wA = pVal->col_dim;
int wB = pVal->col_dim;
//int bz = pVal->tbz; //size of each tile
int bz = MATRIX_BLOCK_SIZE;
int m = wA;
float Csub = 0.0;
float *A = pKey->matrix1;
float *B = pKey->matrix2;
float *C = pKey->matrix3;
int start_row_id_a_matrix = pVal->row*bz;
int start_row_id_b_matrix = pVal->col*bz;
int aHeight = bz;
int aHeightBlocks = aHeight/bz;
int aLastBlockHeight = aHeight - (aHeightBlocks*bz);
if (aLastBlockHeight>0){
aHeightBlocks++;
}//if
int bWidth = bz;
int bWidthBlocks = bWidth/bz;
int bLastBlockWidth = bWidth - (bWidthBlocks*bz);
if (bLastBlockWidth>0){
bWidthBlocks++;
}//if
int commBlocks = m/bz;
int commLastBlockWidth = m - (commBlocks*bz);
if (commLastBlockWidth >0){
//commBlocks++;
}//fi
int aBlockHeight = bz;
int bBlockWidth = bz;
int commBlockWidth = bz;
int ib,jb,kb;
//int bx = blockIdx.x;
//int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
__shared__ float As[THREAD_BLOCK_SIZE][THREAD_BLOCK_SIZE];
__shared__ float Bs[THREAD_BLOCK_SIZE][THREAD_BLOCK_SIZE];
__shared__ int row_id_a;
__shared__ int row_id_b;
for (int y=0;y<MATRIX_BLOCK_SIZE;y++){
for (int x=0;x<MATRIX_BLOCK_SIZE;x++){
Csub = 0.0;
if (y*MATRIX_BLOCK_SIZE+x==THREAD_ID){
row_id_a = start_row_id_a_matrix;
row_id_b = start_row_id_b_matrix;
}//if
__syncthreads();
int row_id = (row_id_a+ty);
if (row_id >= m) row_id = m-1;
int col_id = (row_id_b+tx);
if (col_id >= m) col_id = m-1;
for (int cb=0; cb<commBlocks; cb++){
//As[ty][tx] = A[(row_id_a + ty)*m + cb*bz + tx];
//Bs[ty][tx] = B[(row_id_b + ty)*m + cb*bz + tx];
As[ty][tx] = A[(row_id)*m + cb*bz + tx];
Bs[ty][tx] = B[(row_id)*m + cb*bz + tx];
__syncthreads();
#pragma unroll
for (int k = 0; k < MATRIX_BLOCK_SIZE; k++)
Csub += As[ty][k]*Bs[k][tx];
__syncthreads();
}//for
//if ((x==0) && (y==5)&&(map_task_idx%50==1))
// printf("commBlocks:%d map_task_idx:%d tx:%d ty:%d Csub:%f index:%d row_id_a:%d bz:%d THREAD_ID:%d\n",commBlocks, map_task_idx, tx, ty, Csub, index, row_id_a, bz, THREAD_ID);
int index = (row_id)*m + (col_id);
C[index] = Csub;
}//for
}
}
#endif //__USER_CU__ |
019c4da2ea74e94aa8dd1fc003ac5bd9c6de1c14.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright (C) 2016 Yusuke Suzuki <yusuke.suzuki@sslab.ics.keio.ac.jp>
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <gloop/gloop.h>
#include <gloop/benchmark.h>
__device__ void throttle(gloop::DeviceLoop<gloop::Global>* loop, int count, int limit)
{
if (count != limit) {
gloop::loop::postTask(loop, [=] (gloop::DeviceLoop<gloop::Global>* loop) {
throttle(loop, count + 1, limit);
});
}
}
#if 0
__device__ void shared(gloop::DeviceLoop<>* loop, int count, int limit, unsigned int* ok)
{
unsigned int* hello = buf;
if (count != limit) {
gloop::loop::postTask(loop, [=] (gloop::DeviceLoop<>* loop) {
shared(loop, count + 1, limit, hello);
});
}
}
#endif
int main(int argc, char** argv) {
if(argc<5) {
fprintf(stderr,"<kernel_iterations> <blocks> <pblocks> <threads>\n");
return -1;
}
int trials=atoi(argv[1]);
int nblocks=atoi(argv[2]);
int physblocks=atoi(argv[3]);
int nthreads=atoi(argv[4]);
int id=atoi(argv[5]);
fprintf(stderr," iterations: %d blocks %d threads %d id %d\n",trials, nblocks, nthreads, id);
{
uint32_t pipelinePageCount = 0;
dim3 blocks(nblocks);
dim3 psblocks(physblocks);
std::unique_ptr<gloop::HostLoop> hostLoop = gloop::HostLoop::create(0);
std::unique_ptr<gloop::HostContext> hostContext = gloop::HostContext::create(*hostLoop, psblocks, pipelinePageCount);
{
std::lock_guard<gloop::HostLoop::KernelLock> lock(hostLoop->kernelLock());
CUDA_SAFE_CALL(hipDeviceSetLimit(hipLimitMallocHeapSize, (1ULL << 20)));
}
gloop::Benchmark benchmark;
benchmark.begin();
hostLoop->launch<gloop::Global>(*hostContext, blocks, dim3(nthreads), [=] GLOOP_DEVICE_LAMBDA (gloop::DeviceLoop<gloop::Global>* loop, int trials) {
throttle(loop, 0, trials);
}, trials);
#if 0
hostLoop->launch(*hostContext, blocks, nthreads, [=] GLOOP_DEVICE_LAMBDA (gloop::DeviceLoop<gloop::Global>* loop, int trials) {
shared(loop, 0, trials, nullptr);
}, trials);
#endif
hostLoop->launch<gloop::Global>(*hostContext, blocks, nthreads, [=] GLOOP_DEVICE_LAMBDA (gloop::DeviceLoop<gloop::Global>* loop, int trials) {
throttle(loop, 0, trials);
}, trials);
benchmark.end();
printf("[%d] ", id);
benchmark.report();
}
return 0;
}
| 019c4da2ea74e94aa8dd1fc003ac5bd9c6de1c14.cu | /*
Copyright (C) 2016 Yusuke Suzuki <yusuke.suzuki@sslab.ics.keio.ac.jp>
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <gloop/gloop.h>
#include <gloop/benchmark.h>
__device__ void throttle(gloop::DeviceLoop<gloop::Global>* loop, int count, int limit)
{
if (count != limit) {
gloop::loop::postTask(loop, [=] (gloop::DeviceLoop<gloop::Global>* loop) {
throttle(loop, count + 1, limit);
});
}
}
#if 0
__device__ void shared(gloop::DeviceLoop<>* loop, int count, int limit, unsigned int* ok)
{
unsigned int* hello = buf;
if (count != limit) {
gloop::loop::postTask(loop, [=] (gloop::DeviceLoop<>* loop) {
shared(loop, count + 1, limit, hello);
});
}
}
#endif
int main(int argc, char** argv) {
if(argc<5) {
fprintf(stderr,"<kernel_iterations> <blocks> <pblocks> <threads>\n");
return -1;
}
int trials=atoi(argv[1]);
int nblocks=atoi(argv[2]);
int physblocks=atoi(argv[3]);
int nthreads=atoi(argv[4]);
int id=atoi(argv[5]);
fprintf(stderr," iterations: %d blocks %d threads %d id %d\n",trials, nblocks, nthreads, id);
{
uint32_t pipelinePageCount = 0;
dim3 blocks(nblocks);
dim3 psblocks(physblocks);
std::unique_ptr<gloop::HostLoop> hostLoop = gloop::HostLoop::create(0);
std::unique_ptr<gloop::HostContext> hostContext = gloop::HostContext::create(*hostLoop, psblocks, pipelinePageCount);
{
std::lock_guard<gloop::HostLoop::KernelLock> lock(hostLoop->kernelLock());
CUDA_SAFE_CALL(cudaDeviceSetLimit(cudaLimitMallocHeapSize, (1ULL << 20)));
}
gloop::Benchmark benchmark;
benchmark.begin();
hostLoop->launch<gloop::Global>(*hostContext, blocks, dim3(nthreads), [=] GLOOP_DEVICE_LAMBDA (gloop::DeviceLoop<gloop::Global>* loop, int trials) {
throttle(loop, 0, trials);
}, trials);
#if 0
hostLoop->launch(*hostContext, blocks, nthreads, [=] GLOOP_DEVICE_LAMBDA (gloop::DeviceLoop<gloop::Global>* loop, int trials) {
shared(loop, 0, trials, nullptr);
}, trials);
#endif
hostLoop->launch<gloop::Global>(*hostContext, blocks, nthreads, [=] GLOOP_DEVICE_LAMBDA (gloop::DeviceLoop<gloop::Global>* loop, int trials) {
throttle(loop, 0, trials);
}, trials);
benchmark.end();
printf("[%d] ", id);
benchmark.report();
}
return 0;
}
|
55529e194dcef5a2f4a94c1f963c9d888ae8e4fa.hip | // !!! This is a file automatically generated by hipify!!!
#include "glassmaterial.h"
#include "math.h"
// Initializes a white material with an albedo of 1
__device__ GlassMaterial::GlassMaterial() : color(Color(1.f, 1.f, 1.f)), reflect(0.3f), transmit(0.7f), ior(1.5f) {
}
// Initializes a material with given color and albedo
__device__ GlassMaterial::GlassMaterial(const Color& color, float reflect, float transmit, float ior) :
color(color), reflect(reflect), transmit(transmit), ior(ior) {
}
// Returns the color of this material
__device__ Color GlassMaterial::GetColor() const {
return color;
}
// Bidirectional Reflectance Distribution Function
__device__ float GlassMaterial::GetBRDF(const Vector& in, const Vector& out, const Vector& normal) const {
return 1.f;
}
// Probability Density Function for cosine-weighted hemisphere sampling
__device__ float GlassMaterial::GetPDF(const Vector& in, const Vector& out, const Vector& normal) const {
return 1.f;
}
// Cosine weighted sampling on the unit hemisphere
__device__ Vector GlassMaterial::GetSample(const Vector& in, const Vector& normal, hiprandState_t* rng) const {
return Transmit(in, normal);
}
// Returns the factor between incoming and outgoing radiance along given rays
__device__ float GlassMaterial::GetMultiplier(const Vector& in, const Vector& out, const Vector& normal) const {
return 1.f;
}
// Returns the in vector reflected about the normal
__device__ Vector GlassMaterial::Reflect(const Vector& in, const Vector& normal) const {
return -2.f * Dot(in, normal) * normal + in;
}
__device__ Vector GlassMaterial::Transmit(const Vector& in, const Vector& normal) const {
bool goingIn = Dot(in, normal) < 0.f;
if (goingIn) return Normalize(in - normal);
else return in;
}
__device__ float GlassMaterial::GetAlbedo() const {
return 1.f;
}
__device__ MaterialType GlassMaterial::GetType() const {
return MT_GLASS;
} | 55529e194dcef5a2f4a94c1f963c9d888ae8e4fa.cu | #include "glassmaterial.h"
#include "math.h"
// Initializes a white material with an albedo of 1
__device__ GlassMaterial::GlassMaterial() : color(Color(1.f, 1.f, 1.f)), reflect(0.3f), transmit(0.7f), ior(1.5f) {
}
// Initializes a material with given color and albedo
__device__ GlassMaterial::GlassMaterial(const Color& color, float reflect, float transmit, float ior) :
color(color), reflect(reflect), transmit(transmit), ior(ior) {
}
// Returns the color of this material
__device__ Color GlassMaterial::GetColor() const {
return color;
}
// Bidirectional Reflectance Distribution Function
__device__ float GlassMaterial::GetBRDF(const Vector& in, const Vector& out, const Vector& normal) const {
return 1.f;
}
// Probability Density Function for cosine-weighted hemisphere sampling
__device__ float GlassMaterial::GetPDF(const Vector& in, const Vector& out, const Vector& normal) const {
return 1.f;
}
// Cosine weighted sampling on the unit hemisphere
__device__ Vector GlassMaterial::GetSample(const Vector& in, const Vector& normal, curandState* rng) const {
return Transmit(in, normal);
}
// Returns the factor between incoming and outgoing radiance along given rays
__device__ float GlassMaterial::GetMultiplier(const Vector& in, const Vector& out, const Vector& normal) const {
return 1.f;
}
// Returns the in vector reflected about the normal
__device__ Vector GlassMaterial::Reflect(const Vector& in, const Vector& normal) const {
return -2.f * Dot(in, normal) * normal + in;
}
__device__ Vector GlassMaterial::Transmit(const Vector& in, const Vector& normal) const {
bool goingIn = Dot(in, normal) < 0.f;
if (goingIn) return Normalize(in - normal);
else return in;
}
__device__ float GlassMaterial::GetAlbedo() const {
return 1.f;
}
__device__ MaterialType GlassMaterial::GetType() const {
return MT_GLASS;
} |
13ea507f509555a66ec6a9319f30e9885ae85317.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <stdio.h>
#include "star3d2r-32x32-2-128_kernel.hu"
#define BENCH_DIM 3
#define BENCH_FPP 25
#define BENCH_RAD 2
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize][dimsize]
= (SB_TYPE (*)[dimsize][dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 5 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
hipError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != hipSuccess) { \
fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == hipSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(hipGetLastError()); \
} while(0)
float *dev_A;
cudaCheckReturn(hipMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float)));
{
cudaCheckReturn(hipMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), hipMemcpyHostToDevice));
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 2 - 2);
const AN5D_TYPE __c3Pad = (2);
#define __c3 c3
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __halo3 = 2;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 24;
const AN5D_TYPE __side3Len = 24;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(hipMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), hipMemcpyDeviceToHost));
}
cudaCheckReturn(hipFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
for (int k = BENCH_RAD; k < dimsize - BENCH_RAD; k++)
A[(t+1)%2][i][j][k] =
0.2500f * A[t%2][i][j][k]
+ 0.0620f * A[t%2][i-1][j][k] + 0.0621f * A[t%2][i+1][j][k]
+ 0.0622f * A[t%2][i][j-1][k] + 0.0623f * A[t%2][i][j+1][k]
+ 0.0624f * A[t%2][i][j][k-1] + 0.06245f * A[t%2][i][j][k+1]
+ 0.06255f * A[t%2][i-2][j][k] + 0.0626f * A[t%2][i+2][j][k]
+ 0.0627f * A[t%2][i][j-2][k] + 0.0628f * A[t%2][i][j+2][k]
+ 0.0629f * A[t%2][i][j][k-2] + 0.0630f * A[t%2][i][j][k+2];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
| 13ea507f509555a66ec6a9319f30e9885ae85317.cu | #include <assert.h>
#include <stdio.h>
#include "star3d2r-32x32-2-128_kernel.hu"
#define BENCH_DIM 3
#define BENCH_FPP 25
#define BENCH_RAD 2
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize][dimsize]
= (SB_TYPE (*)[dimsize][dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 5 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
cudaError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != cudaSuccess) { \
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == cudaSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(cudaGetLastError()); \
} while(0)
float *dev_A;
cudaCheckReturn(cudaMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float)));
{
cudaCheckReturn(cudaMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), cudaMemcpyHostToDevice));
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 2 - 2);
const AN5D_TYPE __c3Pad = (2);
#define __c3 c3
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __halo3 = 2;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 24;
const AN5D_TYPE __side3Len = 24;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(cudaMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), cudaMemcpyDeviceToHost));
}
cudaCheckReturn(cudaFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
for (int k = BENCH_RAD; k < dimsize - BENCH_RAD; k++)
A[(t+1)%2][i][j][k] =
0.2500f * A[t%2][i][j][k]
+ 0.0620f * A[t%2][i-1][j][k] + 0.0621f * A[t%2][i+1][j][k]
+ 0.0622f * A[t%2][i][j-1][k] + 0.0623f * A[t%2][i][j+1][k]
+ 0.0624f * A[t%2][i][j][k-1] + 0.06245f * A[t%2][i][j][k+1]
+ 0.06255f * A[t%2][i-2][j][k] + 0.0626f * A[t%2][i+2][j][k]
+ 0.0627f * A[t%2][i][j-2][k] + 0.0628f * A[t%2][i][j+2][k]
+ 0.0629f * A[t%2][i][j][k-2] + 0.0630f * A[t%2][i][j][k+2];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
|
c1dc2b179b0062bdd6821c4c8674549d08c06e89.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
__global__ void add2(int *a)
{
*a = *a + 2;
}
int main( void )
{
int *data_h, *data_d;
hipMalloc( (void**)&data_d, sizeof(int));
//data_d = (int *)malloc(sizeof(int));
data_h = (int *)malloc(sizeof(int));
*data_h = 5;
hipMemcpy( data_d, data_h, sizeof(int), hipMemcpyHostToDevice );
hipLaunchKernelGGL(( add2), dim3(1),dim3(1), 0, 0, data_d);
hipMemcpy(data_h, data_d, sizeof(int), hipMemcpyDeviceToHost );
printf("data: %d\n", *data_h);
hipFree(data_d);
free(data_h);
return 0;
}
//Res = 7
| c1dc2b179b0062bdd6821c4c8674549d08c06e89.cu | #include<stdio.h>
__global__ void add2(int *a)
{
*a = *a + 2;
}
int main( void )
{
int *data_h, *data_d;
cudaMalloc( (void**)&data_d, sizeof(int));
//data_d = (int *)malloc(sizeof(int));
data_h = (int *)malloc(sizeof(int));
*data_h = 5;
cudaMemcpy( data_d, data_h, sizeof(int), cudaMemcpyHostToDevice );
add2<<<1,1>>>(data_d);
cudaMemcpy(data_h, data_d, sizeof(int), cudaMemcpyDeviceToHost );
printf("data: %d\n", *data_h);
cudaFree(data_d);
free(data_h);
return 0;
}
//Res = 7
|
a4e3f70c1b1b3914796d4e4c91e25e42c8a478f0.hip | // !!! This is a file automatically generated by hipify!!!
#include <torch/types.h>
#include <torch/extension.h>
// bugs with copy_to_
// #include <ATen/hip/HIPApplyUtils.cuh>
#include "CUDAApplyUtils.cuh"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
// TORCH_CHECK replaces AT_CHECK in PyTorch 1,2, support 1.1 as well.
#ifndef TORCH_CHECK
#define TORCH_CHECK AT_CHECK
#endif
#ifndef __CUDACC_EXTENDED_LAMBDA__
#error "please compile with --expt-extended-lambda"
#endif
namespace kernel {
#include "swish_cuda.h"
using at::cuda::CUDA_tensor_apply2;
using at::cuda::CUDA_tensor_apply3;
using at::cuda::TensorArgType;
template <typename scalar_t>
void
swish_forward(
torch::Tensor &output,
const torch::Tensor &input
) {
CUDA_tensor_apply2<scalar_t,scalar_t>(
output, input,
[=] __host__ __device__ (scalar_t &out, const scalar_t &inp) {
swish_fwd_func(out, inp);
},
TensorArgType::ReadWrite, TensorArgType::ReadOnly
);
}
template <typename scalar_t>
void
swish_backward(
torch::Tensor &grad_inp,
const torch::Tensor &input,
const torch::Tensor &grad_out
) {
CUDA_tensor_apply3<scalar_t,scalar_t,scalar_t>(
grad_inp, input, grad_out,
[=] __host__ __device__ (scalar_t &grad_inp, const scalar_t &inp, const scalar_t &grad_out) {
swish_bwd_func(grad_inp, inp, grad_out);
},
TensorArgType::ReadWrite, TensorArgType::ReadOnly, TensorArgType::ReadOnly
);
}
} // namespace kernel
void
swish_forward_cuda(
torch::Tensor &output, const torch::Tensor &input
) {
auto in_arg = torch::TensorArg(input, "input", 0),
out_arg = torch::TensorArg(output, "output", 1);
torch::checkAllDefined("swish_forward_cuda", {in_arg, out_arg});
torch::checkAllSameGPU("swish_forward_cuda", {in_arg, out_arg});
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "swish_forward_cuda", [&] {
kernel::swish_forward<scalar_t>(output, input);
});
}
void
swish_backward_cuda(
torch::Tensor &grad_inp, const torch::Tensor &input, const torch::Tensor &grad_out
) {
auto gi_arg = torch::TensorArg(grad_inp, "grad_inp", 0),
in_arg = torch::TensorArg(input, "input", 1),
go_arg = torch::TensorArg(grad_out, "grad_out", 2);
torch::checkAllDefined("swish_backward_cuda", {gi_arg, in_arg, go_arg});
torch::checkAllSameGPU("swish_backward_cuda", {gi_arg, in_arg, go_arg});
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_inp.scalar_type(), "swish_backward_cuda", [&] {
kernel::swish_backward<scalar_t>(grad_inp, input, grad_out);
});
}
| a4e3f70c1b1b3914796d4e4c91e25e42c8a478f0.cu | #include <torch/types.h>
#include <torch/extension.h>
// bugs with copy_to_
// #include <ATen/cuda/CUDAApplyUtils.cuh>
#include "CUDAApplyUtils.cuh"
#include <cuda.h>
#include <cuda_runtime.h>
// TORCH_CHECK replaces AT_CHECK in PyTorch 1,2, support 1.1 as well.
#ifndef TORCH_CHECK
#define TORCH_CHECK AT_CHECK
#endif
#ifndef __CUDACC_EXTENDED_LAMBDA__
#error "please compile with --expt-extended-lambda"
#endif
namespace kernel {
#include "swish_cuda.h"
using at::cuda::CUDA_tensor_apply2;
using at::cuda::CUDA_tensor_apply3;
using at::cuda::TensorArgType;
template <typename scalar_t>
void
swish_forward(
torch::Tensor &output,
const torch::Tensor &input
) {
CUDA_tensor_apply2<scalar_t,scalar_t>(
output, input,
[=] __host__ __device__ (scalar_t &out, const scalar_t &inp) {
swish_fwd_func(out, inp);
},
TensorArgType::ReadWrite, TensorArgType::ReadOnly
);
}
template <typename scalar_t>
void
swish_backward(
torch::Tensor &grad_inp,
const torch::Tensor &input,
const torch::Tensor &grad_out
) {
CUDA_tensor_apply3<scalar_t,scalar_t,scalar_t>(
grad_inp, input, grad_out,
[=] __host__ __device__ (scalar_t &grad_inp, const scalar_t &inp, const scalar_t &grad_out) {
swish_bwd_func(grad_inp, inp, grad_out);
},
TensorArgType::ReadWrite, TensorArgType::ReadOnly, TensorArgType::ReadOnly
);
}
} // namespace kernel
void
swish_forward_cuda(
torch::Tensor &output, const torch::Tensor &input
) {
auto in_arg = torch::TensorArg(input, "input", 0),
out_arg = torch::TensorArg(output, "output", 1);
torch::checkAllDefined("swish_forward_cuda", {in_arg, out_arg});
torch::checkAllSameGPU("swish_forward_cuda", {in_arg, out_arg});
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "swish_forward_cuda", [&] {
kernel::swish_forward<scalar_t>(output, input);
});
}
void
swish_backward_cuda(
torch::Tensor &grad_inp, const torch::Tensor &input, const torch::Tensor &grad_out
) {
auto gi_arg = torch::TensorArg(grad_inp, "grad_inp", 0),
in_arg = torch::TensorArg(input, "input", 1),
go_arg = torch::TensorArg(grad_out, "grad_out", 2);
torch::checkAllDefined("swish_backward_cuda", {gi_arg, in_arg, go_arg});
torch::checkAllSameGPU("swish_backward_cuda", {gi_arg, in_arg, go_arg});
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_inp.scalar_type(), "swish_backward_cuda", [&] {
kernel::swish_backward<scalar_t>(grad_inp, input, grad_out);
});
}
|
f6bc915d2ec34967c22e35cb736bff0f2cd065b1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 paddlepaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include <vector>
#include "paddle/fluid/operators/math/pooling.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle {
namespace operators {
namespace math {
template <typename PoolProcess, typename T>
__global__ void KernelPool2D(const int nthreads, const T* input_data,
const int channels, const int input_height,
const int input_width, const int output_height,
const int output_width, const int ksize_height,
const int ksize_width, const int stride_height,
const int stride_width, const int padding_height,
const int padding_width, PoolProcess pool_process,
bool exclusive, bool adaptive, T* output_data) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int pw = index % output_width;
int ph = (index / output_width) % output_height;
int c = (index / output_width / output_height) % channels;
int batch_idx = index / output_width / output_height / channels;
int hstart, hend;
int wstart, wend;
if (adaptive) {
hstart = AdaptStartIndex(ph, input_height, output_height);
hend = AdaptEndIndex(ph, input_height, output_height);
wstart = AdaptStartIndex(pw, input_width, output_width);
wend = AdaptEndIndex(pw, input_width, output_width);
} else {
hstart = ph * stride_height - padding_height;
hend = min(hstart + ksize_height, input_height);
hstart = max(hstart, 0);
wstart = pw * stride_width - padding_width;
wend = min(wstart + ksize_width, input_width);
wstart = max(wstart, 0);
}
input_data += (batch_idx * channels + c) * input_height * input_width;
T ele = pool_process.initial();
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
pool_process.compute(input_data[h * input_width + w], &ele);
}
}
int pool_size = (exclusive || adaptive) ? (hend - hstart) * (wend - wstart)
: ksize_height * ksize_width;
pool_process.finalize(static_cast<T>(pool_size), &ele);
output_data[index] = ele;
}
}
template <typename PoolProcess, typename T>
__global__ void KernelPool2DGrad(
const int nthreads, const T* input_data, const T* output_data,
const T* output_grad, const int channels, const int input_height,
const int input_width, const int output_height, const int output_width,
const int ksize_height, const int ksize_width, const int stride_height,
const int stride_width, const int padding_height, const int padding_width,
PoolProcess pool_process, bool exclusive, bool adaptive, T* input_grad) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int w_offset = index % input_width + padding_width;
int h_offset = (index / input_width) % input_height + padding_height;
int offsetC = (index / input_width / input_height) % channels;
int batch_idx = index / input_width / input_height / channels;
int phstart, phend;
int pwstart, pwend;
if (adaptive) {
phstart = h_offset * output_height / input_height;
phend =
min((h_offset + 1) * output_height / input_height + 1, output_height);
pwstart = w_offset * output_width / input_width;
pwend =
min((w_offset + 1) * output_width / input_width + 1, output_width);
} else {
phstart = (h_offset < ksize_height)
? 0
: (h_offset - ksize_height) / stride_height + 1;
pwstart = (w_offset < ksize_width)
? 0
: (w_offset - ksize_width) / stride_width + 1;
phend = min(h_offset / stride_height + 1, output_height);
pwend = min(w_offset / stride_width + 1, output_width);
}
T gradient = 0;
T input = input_data[index];
int output_idx =
(batch_idx * channels + offsetC) * output_height * output_width;
output_data += output_idx;
output_grad += output_idx;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
int pool_size;
if (adaptive) {
pool_size = static_cast<int>(ceil(static_cast<double>(input_height) /
ksize_height)) *
static_cast<int>(
ceil(static_cast<double>(input_width) / ksize_width));
} else {
int hstart = ph * stride_height - padding_height;
int wstart = pw * stride_width - padding_width;
int hend = min(hstart + ksize_height, input_height);
int wend = min(wstart + ksize_width, input_width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
pool_size = exclusive ? (hend - hstart) * (wend - wstart)
: ksize_height * ksize_width;
}
int output_sub_idx = ph * output_width + pw;
pool_process.compute(input, output_data[output_sub_idx],
output_grad[output_sub_idx],
static_cast<T>(1.0 / pool_size), &gradient);
}
}
input_grad[index] = gradient;
}
}
template <typename T>
__global__ void KernelMaxPool2DGrad(
const int nthreads, const T* input_data, const T* output_data,
const T* output_grad, const int channels, const int input_height,
const int input_width, const int output_height, const int output_width,
const int ksize_height, const int ksize_width, const int stride_height,
const int stride_width, const int padding_height, const int padding_width,
T* input_grad) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int pw = index % output_width;
int ph = (index / output_width) % output_height;
int c = (index / output_width / output_height) % channels;
int batch_idx = index / output_width / output_height / channels;
int hstart = ph * stride_height - padding_height;
int hend = min(hstart + ksize_height, input_height);
hstart = max(hstart, 0);
int wstart = pw * stride_width - padding_width;
int wend = min(wstart + ksize_width, input_width);
wstart = max(wstart, 0);
input_data += (batch_idx * channels + c) * input_height * input_width;
input_grad += (batch_idx * channels + c) * input_height * input_width;
T ele = output_data[index];
int maxIndex = -1;
bool stop = false;
for (int h = hstart; h < hend && !stop; ++h) {
for (int w = wstart; w < wend && !stop; ++w) {
if (ele == input_data[h * input_width + w]) {
maxIndex = h * input_width + w;
stop = true;
}
}
}
if (maxIndex != -1) {
// atomic add
platform::CudaAtomicAdd(input_grad + maxIndex, output_grad[index]);
}
}
}
template <typename PoolProcess, typename T>
void Pool2dDirectCUDAFunctor<PoolProcess, T>::operator()(
const T* input, const std::vector<int>& input_shape,
const std::vector<int>& output_shape, const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& paddings,
PoolProcess pool_compute, bool exclusive, T* output, hipStream_t stream) {
const int batch_size = input_shape[0];
const int input_channels = input_shape[1];
const int input_height = input_shape[2];
const int input_width = input_shape[3];
const int output_channels = output_shape[1];
const int output_height = output_shape[2];
const int output_width = output_shape[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
int nthreads = batch_size * output_channels * output_height * output_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
hipLaunchKernelGGL(( KernelPool2D<PoolProcess, T>), dim3(grid), dim3(threads), 0, stream,
nthreads, input, input_channels, input_height, input_width, output_height,
output_width, ksize_height, ksize_width, stride_height, stride_width,
padding_height, padding_width, pool_compute, exclusive, false, output);
}
/*
* All tensors are in NCHW format.
* Ksize, strides, paddings are two elements. These two elements represent
* height and width, respectively.
*/
template <typename PoolProcess, typename T>
class Pool2dFunctor<platform::CUDADeviceContext, PoolProcess, T> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input, const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings, PoolProcess pool_process,
bool exclusive, bool adaptive, framework::Tensor* output) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_height = input.dims()[2];
const int input_width = input.dims()[3];
const int output_channels = output->dims()[1];
const int output_height = output->dims()[2];
const int output_width = output->dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T* input_data = input.data<T>();
T* output_data = output->mutable_data<T>(context.GetPlace());
int nthreads = batch_size * output_channels * output_height * output_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
hipLaunchKernelGGL(( KernelPool2D<PoolProcess, T>), dim3(grid), dim3(threads), 0, context.stream(),
nthreads, input_data, input_channels, input_height, input_width,
output_height, output_width, ksize_height, ksize_width, stride_height,
stride_width, padding_height, padding_width, pool_process, exclusive,
adaptive, output_data);
}
};
/*
* All tensors are in NCHW format.
* Ksize, strides, paddings are two elements. These two elements represent
* height and width, respectively.
*/
template <typename PoolProcess, typename T>
class Pool2dGradFunctor<platform::CUDADeviceContext, PoolProcess, T> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input,
const framework::Tensor& output,
const framework::Tensor& output_grad,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings, PoolProcess pool_process,
bool exclusive, bool adaptive,
framework::Tensor* input_grad) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_height = input.dims()[2];
const int input_width = input.dims()[3];
const int output_height = output.dims()[2];
const int output_width = output.dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace());
int nthreads = batch_size * input_channels * input_height * input_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
hipLaunchKernelGGL(( KernelPool2DGrad<PoolProcess, T>), dim3(grid), dim3(threads), 0, context.stream(),
nthreads, input_data, output_data, output_grad_data, input_channels,
input_height, input_width, output_height, output_width, ksize_height,
ksize_width, stride_height, stride_width, padding_height, padding_width,
pool_process, exclusive, adaptive, input_grad_data);
}
};
/*
* All tensors are in NCHW format.
* Ksize, strides, paddings are two elements. These two elements represent
* height and width, respectively.
*/
template <typename T>
class MaxPool2dGradFunctor<platform::CUDADeviceContext, T> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input,
const framework::Tensor& output,
const framework::Tensor& output_grad,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
framework::Tensor* input_grad) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_height = input.dims()[2];
const int input_width = input.dims()[3];
const int output_channels = output.dims()[1];
const int output_height = output.dims()[2];
const int output_width = output.dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace());
int nthreads = batch_size * output_channels * output_height * output_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
hipLaunchKernelGGL(( KernelMaxPool2DGrad<T>), dim3(grid), dim3(threads), 0, context.stream(),
nthreads, input_data, output_data, output_grad_data, input_channels,
input_height, input_width, output_height, output_width, ksize_height,
ksize_width, stride_height, stride_width, padding_height, padding_width,
input_grad_data);
}
};
template class Pool2dDirectCUDAFunctor<paddle::operators::math::MaxPool<float>,
float>;
template class Pool2dDirectCUDAFunctor<paddle::operators::math::AvgPool<float>,
float>;
template class MaxPool2dGradFunctor<platform::CUDADeviceContext, float>;
template class MaxPool2dGradFunctor<platform::CUDADeviceContext, double>;
template class Pool2dFunctor<platform::CUDADeviceContext,
paddle::operators::math::MaxPool<float>, float>;
template class Pool2dFunctor<platform::CUDADeviceContext,
paddle::operators::math::AvgPool<float>, float>;
template class Pool2dGradFunctor<platform::CUDADeviceContext,
paddle::operators::math::MaxPoolGrad<float>,
float>;
template class Pool2dGradFunctor<platform::CUDADeviceContext,
paddle::operators::math::AvgPoolGrad<float>,
float>;
template class Pool2dFunctor<platform::CUDADeviceContext,
paddle::operators::math::MaxPool<double>, double>;
template class Pool2dFunctor<platform::CUDADeviceContext,
paddle::operators::math::AvgPool<double>, double>;
template class Pool2dGradFunctor<platform::CUDADeviceContext,
paddle::operators::math::MaxPoolGrad<double>,
double>;
template class Pool2dGradFunctor<platform::CUDADeviceContext,
paddle::operators::math::AvgPoolGrad<double>,
double>;
template <typename PoolProcess, typename T>
__global__ void KernelPool3D(
const int nthreads, const T* input_data, const int channels,
const int input_depth, const int input_height, const int input_width,
const int output_depth, const int output_height, const int output_width,
const int ksize_depth, const int ksize_height, const int ksize_width,
const int stride_depth, const int stride_height, const int stride_width,
const int padding_depth, const int padding_height, const int padding_width,
PoolProcess pool_process, bool exclusive, bool adaptive, T* output_data) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int pw = index % output_width;
int ph = (index / output_width) % output_height;
int pd = (index / output_width / output_height) % output_depth;
int c = (index / output_width / output_height / output_depth) % channels;
int batch_idx =
index / output_width / output_height / output_depth / channels;
int dstart, dend;
int hstart, hend;
int wstart, wend;
if (adaptive) {
dstart = AdaptStartIndex(pd, input_depth, output_depth);
dend = AdaptEndIndex(pd, input_depth, output_depth);
hstart = AdaptStartIndex(ph, input_height, output_height);
hend = AdaptEndIndex(ph, input_height, output_height);
wstart = AdaptStartIndex(pw, input_width, output_width);
wend = AdaptEndIndex(pw, input_width, output_width);
} else {
dstart = pd * stride_depth - padding_depth;
hstart = ph * stride_height - padding_height;
wstart = pw * stride_width - padding_width;
dend = min(dstart + ksize_depth, input_depth);
hend = min(hstart + ksize_height, input_height);
wend = min(wstart + ksize_width, input_width);
dstart = max(dstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
}
T ele = pool_process.initial();
input_data +=
(batch_idx * channels + c) * input_depth * input_height * input_width;
for (int d = dstart; d < dend; ++d) {
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
pool_process.compute(
input_data[(d * input_height + h) * input_width + w], &ele);
}
}
}
int pool_size = (exclusive || adaptive)
? (dend - dstart) * (hend - hstart) * (wend - wstart)
: ksize_depth * ksize_height * ksize_width;
pool_process.finalize(static_cast<T>(pool_size), &ele);
output_data[index] = ele;
}
}
template <typename PoolProcess, typename T>
__global__ void KernelPool3DGrad(
const int nthreads, const T* input_data, const T* output_data,
const T* output_grad, const int channels, const int input_depth,
const int input_height, const int input_width, const int output_depth,
const int output_height, const int output_width, const int ksize_depth,
const int ksize_height, const int ksize_width, const int stride_depth,
const int stride_height, const int stride_width, const int padding_depth,
const int padding_height, const int padding_width, PoolProcess pool_process,
bool exclusive, bool adaptive, T* input_grad) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int w_offset = index % input_width + padding_width;
int h_offset = (index / input_width) % input_height + padding_height;
int d_offset =
(index / input_width / input_height) % input_depth + padding_depth;
int offsetC = (index / input_width / input_height / input_depth) % channels;
int batch_idx = index / input_width / input_height / input_depth / channels;
int pdstart, pdend;
int phstart, phend;
int pwstart, pwend;
if (adaptive) {
pdstart = d_offset * output_depth / input_depth;
pdend =
min((d_offset + 1) * output_depth / input_depth + 1, output_depth);
phstart = h_offset * output_height / input_height;
phend =
min((h_offset + 1) * output_height / input_height + 1, output_height);
pwstart = w_offset * output_width / input_width;
pwend =
min((w_offset + 1) * output_width / input_width + 1, output_width);
} else {
pdstart = (d_offset < ksize_depth)
? 0
: (d_offset - ksize_depth) / stride_depth + 1;
phstart = (h_offset < ksize_height)
? 0
: (h_offset - ksize_height) / stride_height + 1;
pwstart = (w_offset < ksize_width)
? 0
: (w_offset - ksize_width) / stride_width + 1;
pdend = min((d_offset) / stride_depth + 1, output_depth);
phend = min((h_offset) / stride_height + 1, output_height);
pwend = min((w_offset) / stride_width + 1, output_width);
}
T gradient = 0;
T input = input_data[index];
int output_idx = (batch_idx * channels + offsetC) * output_depth *
output_height * output_width;
output_data += output_idx;
output_grad += output_idx;
for (int pd = pdstart; pd < pdend; ++pd) {
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int pool_size;
if (adaptive) {
pool_size =
static_cast<int>(
ceil(static_cast<double>(input_depth) / ksize_depth)) *
static_cast<int>(
ceil(static_cast<double>(input_height) / ksize_height)) *
static_cast<int>(
ceil(static_cast<double>(input_width) / ksize_width));
} else {
int dstart = pd * stride_depth - padding_depth;
int hstart = ph * stride_height - padding_height;
int wstart = pw * stride_width - padding_width;
int dend = min(dstart + ksize_depth, input_depth);
int hend = min(hstart + ksize_height, input_height);
int wend = min(wstart + ksize_width, input_width);
dstart = max(dstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
pool_size =
exclusive ? (dend - dstart) * (hend - hstart) * (wend - wstart)
: ksize_depth * ksize_height * ksize_width;
}
int output_sub_idx = (pd * output_height + ph) * output_width + pw;
pool_process.compute(input, output_data[output_sub_idx],
output_grad[output_sub_idx],
static_cast<T>(1.0 / pool_size), &gradient);
}
}
}
input_grad[index] = gradient;
}
}
template <typename T>
__global__ void KernelMaxPool3DGrad(
const int nthreads, const T* input_data, const T* output_data,
const T* output_grad, const int channels, const int input_depth,
const int input_height, const int input_width, const int output_depth,
const int output_height, const int output_width, const int ksize_depth,
const int ksize_height, const int ksize_width, const int stride_depth,
const int stride_height, const int stride_width, const int padding_depth,
const int padding_height, const int padding_width, T* input_grad) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int pw = index % output_width;
int ph = (index / output_width) % output_height;
int pd = (index / output_width / output_height) % output_depth;
int c = (index / output_width / output_height / output_depth) % channels;
int batch_idx =
index / output_width / output_height / output_depth / channels;
int dstart = pd * stride_depth - padding_depth;
int hstart = ph * stride_height - padding_height;
int wstart = pw * stride_width - padding_width;
int dend = min(dstart + ksize_depth, input_depth);
int hend = min(hstart + ksize_height, input_height);
int wend = min(wstart + ksize_width, input_width);
dstart = max(dstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
T ele = output_data[index];
bool stop = false;
int maxIdx = -1;
input_data +=
(batch_idx * channels + c) * input_depth * input_height * input_width;
input_grad +=
(batch_idx * channels + c) * input_depth * input_height * input_width;
for (int d = dstart; d < dend && !stop; ++d) {
for (int h = hstart; h < hend && !stop; ++h) {
for (int w = wstart; w < wend && !stop; ++w) {
if (ele == input_data[(d * input_height + h) * input_width + w]) {
stop = true;
maxIdx = (d * input_height + h) * input_width + w;
}
}
}
}
if (maxIdx != -1) {
// atomic add
platform::CudaAtomicAdd(input_grad + maxIdx, output_grad[index]);
}
}
}
/*
* All tensors are in NCDHW format.
* Ksize, strides, paddings are three elements. These three elements represent
* depth, height and width, respectively.
*/
template <typename PoolProcess, class T>
class Pool3dFunctor<platform::CUDADeviceContext, PoolProcess, T> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input, const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings, PoolProcess pool_process,
bool exclusive, bool adaptive, framework::Tensor* output) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_depth = input.dims()[2];
const int input_height = input.dims()[3];
const int input_width = input.dims()[4];
const int output_channels = output->dims()[1];
const int output_depth = output->dims()[2];
const int output_height = output->dims()[3];
const int output_width = output->dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T* input_data = input.data<T>();
T* output_data = output->mutable_data<T>(context.GetPlace());
int nthreads = batch_size * output_channels * output_depth * output_height *
output_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
hipLaunchKernelGGL(( KernelPool3D<PoolProcess, T>), dim3(grid), dim3(threads), 0, context.stream(),
nthreads, input_data, input_channels, input_depth, input_height,
input_width, output_depth, output_height, output_width, ksize_depth,
ksize_height, ksize_width, stride_depth, stride_height, stride_width,
padding_depth, padding_height, padding_width, pool_process, exclusive,
adaptive, output_data);
}
};
/*
* All tensors are in NCDHW format.
* Ksize, strides, paddings are three elements. These three elements represent
* depth, height and width, respectively.
*/
template <typename PoolProcess, class T>
class Pool3dGradFunctor<platform::CUDADeviceContext, PoolProcess, T> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input,
const framework::Tensor& output,
const framework::Tensor& output_grad,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings, PoolProcess pool_process,
bool exclusive, bool adaptive,
framework::Tensor* input_grad) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_depth = input.dims()[2];
const int input_height = input.dims()[3];
const int input_width = input.dims()[4];
const int output_channels = output.dims()[1];
const int output_depth = output.dims()[2];
const int output_height = output.dims()[3];
const int output_width = output.dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace());
int nthreads =
batch_size * input_channels * input_depth * input_height * input_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
hipLaunchKernelGGL(( KernelPool3DGrad<PoolProcess, T>), dim3(grid), dim3(threads), 0, context.stream(),
nthreads, input_data, output_data, output_grad_data, input_channels,
input_depth, input_height, input_width, output_depth, output_height,
output_width, ksize_depth, ksize_height, ksize_width, stride_depth,
stride_height, stride_width, padding_depth, padding_height,
padding_width, pool_process, exclusive, adaptive, input_grad_data);
}
};
/*
* All tensors are in NCDHW format.
* Ksize, strides, paddings are three elements. These three elements represent
* depth, height and width, respectively.
*/
template <class T>
class MaxPool3dGradFunctor<platform::CUDADeviceContext, T> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input,
const framework::Tensor& output,
const framework::Tensor& output_grad,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
framework::Tensor* input_grad) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_depth = input.dims()[2];
const int input_height = input.dims()[3];
const int input_width = input.dims()[4];
const int output_channels = output.dims()[1];
const int output_depth = output.dims()[2];
const int output_height = output.dims()[3];
const int output_width = output.dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace());
int nthreads = batch_size * output_channels * output_depth * output_height *
output_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
hipLaunchKernelGGL(( KernelMaxPool3DGrad<T>), dim3(grid), dim3(threads), 0, context.stream(),
nthreads, input_data, output_data, output_grad_data, input_channels,
input_depth, input_height, input_width, output_depth, output_height,
output_width, ksize_depth, ksize_height, ksize_width, stride_depth,
stride_height, stride_width, padding_depth, padding_height,
padding_width, input_grad_data);
}
};
template class MaxPool3dGradFunctor<platform::CUDADeviceContext, float>;
template class MaxPool3dGradFunctor<platform::CUDADeviceContext, double>;
template class Pool3dFunctor<platform::CUDADeviceContext,
paddle::operators::math::MaxPool<float>, float>;
template class Pool3dFunctor<platform::CUDADeviceContext,
paddle::operators::math::AvgPool<float>, float>;
template class Pool3dGradFunctor<platform::CUDADeviceContext,
paddle::operators::math::MaxPoolGrad<float>,
float>;
template class Pool3dGradFunctor<platform::CUDADeviceContext,
paddle::operators::math::AvgPoolGrad<float>,
float>;
template class Pool3dFunctor<platform::CUDADeviceContext,
paddle::operators::math::MaxPool<double>, double>;
template class Pool3dFunctor<platform::CUDADeviceContext,
paddle::operators::math::AvgPool<double>, double>;
template class Pool3dGradFunctor<platform::CUDADeviceContext,
paddle::operators::math::MaxPoolGrad<double>,
double>;
template class Pool3dGradFunctor<platform::CUDADeviceContext,
paddle::operators::math::AvgPoolGrad<double>,
double>;
template <typename T1, typename T2>
__global__ void KernelMaxPool2dWithIdx(
const int nthreads, const T1* input_data, const int channels,
const int input_height, const int input_width, const int output_height,
const int output_width, const int ksize_height, const int ksize_width,
const int stride_height, const int stride_width, const int padding_height,
const int padding_width, bool adaptive, T1* output_data, T2* mask_data) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int pw = index % output_width;
int ph = (index / output_width) % output_height;
int c = (index / output_width / output_height) % channels;
int batch_idx = index / output_width / output_height / channels;
int hstart, hend;
int wstart, wend;
if (adaptive) {
hstart = AdaptStartIndex(ph, input_height, output_height);
hend = AdaptEndIndex(ph, input_height, output_height);
wstart = AdaptStartIndex(pw, input_width, output_width);
wend = AdaptEndIndex(pw, input_width, output_width);
} else {
hstart = ph * stride_height - padding_height;
hend = min(hstart + ksize_height, input_height);
hstart = max(hstart, 0);
wstart = pw * stride_width - padding_width;
wend = min(wstart + ksize_width, input_width);
wstart = max(wstart, 0);
}
input_data += (batch_idx * channels + c) * input_height * input_width;
T1 ele = -FLT_MAX;
int max_index = -1;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int input_index = h * input_width + w;
if (ele < input_data[input_index]) {
max_index = input_index;
ele = input_data[input_index];
}
}
}
output_data[index] = ele;
mask_data[index] = max_index;
}
}
template <typename T1, typename T2>
__global__ void KernelMaxPool2DWithIdxGrad(
const int nthreads, const T1* output_grad, const T2* mask_data,
const int channels, const int input_height, const int input_width,
const int output_height, const int output_width, const int ksize_height,
const int ksize_width, const int stride_height, const int stride_width,
const int padding_height, const int padding_width, bool adaptive,
T1* input_grad) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int w_offset = index % input_width;
int h_offset = (index / input_width) % input_height;
int offsetC = (index / input_width / input_height) % channels;
int batch_idx = index / input_width / input_height / channels;
int phstart, phend;
int pwstart, pwend;
if (adaptive) {
phstart = h_offset * output_height / input_height;
phend =
min((h_offset + 1) * output_height / input_height + 1, output_height);
pwstart = w_offset * output_width / input_width;
pwend =
min((w_offset + 1) * output_width / input_width + 1, output_width);
} else {
phstart =
(h_offset + padding_height < ksize_height)
? 0
: (h_offset + padding_height - ksize_height) / stride_height + 1;
pwstart =
(w_offset + padding_width < ksize_width)
? 0
: (w_offset + padding_width - ksize_width) / stride_width + 1;
phend =
min((h_offset + padding_height) / stride_height + 1, output_height);
pwend = min((w_offset + padding_width) / stride_width + 1, output_width);
}
T1 gradient = 0;
int input_current_featuremap_idx = h_offset * input_width + w_offset;
int output_idx =
(batch_idx * channels + offsetC) * output_height * output_width;
mask_data += output_idx;
output_grad += output_idx;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (mask_data[ph * output_width + pw] == input_current_featuremap_idx)
gradient += output_grad[ph * output_width + pw];
}
}
input_grad[index] = gradient;
}
}
/*
* All tensors are in NCHW format.
* Ksize, strides, paddings are two elements. These two elements represent
* height and width, respectively.
*/
template <typename T1, typename T2>
class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, T1, T2> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input, const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings, bool adaptive,
framework::Tensor* output, framework::Tensor* mask) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_height = input.dims()[2];
const int input_width = input.dims()[3];
const int output_channels = output->dims()[1];
const int output_height = output->dims()[2];
const int output_width = output->dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T1* input_data = input.data<T1>();
T1* output_data = output->mutable_data<T1>(context.GetPlace());
T2* mask_data = mask->mutable_data<T2>(context.GetPlace());
int nthreads = batch_size * output_channels * output_height * output_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
hipLaunchKernelGGL(( KernelMaxPool2dWithIdx<T1, T2>), dim3(grid), dim3(threads), 0, context.stream(),
nthreads, input_data, input_channels, input_height, input_width,
output_height, output_width, ksize_height, ksize_width, stride_height,
stride_width, padding_height, padding_width, adaptive, output_data,
mask_data);
}
};
/*
* All tensors are in NCHW format.
* Ksize, strides, paddings are two elements. These two elements represent
* height and width, respectively.
*/
template <typename T1, typename T2>
class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext, T1, T2> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& output_grad,
const framework::Tensor& mask, const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings, bool adaptive,
framework::Tensor* input_grad) {
const int batch_size = input_grad->dims()[0];
const int input_channels = input_grad->dims()[1];
const int input_height = input_grad->dims()[2];
const int input_width = input_grad->dims()[3];
const int output_height = output_grad.dims()[2];
const int output_width = output_grad.dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T2* mask_data = mask.data<T2>();
const T1* output_grad_data = output_grad.data<T1>();
T1* input_grad_data = input_grad->mutable_data<T1>(context.GetPlace());
int nthreads = batch_size * input_channels * input_height * input_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
hipLaunchKernelGGL(( KernelMaxPool2DWithIdxGrad<T1, T2>), dim3(grid), dim3(threads), 0, context.stream(),
nthreads, output_grad_data, mask_data, input_channels, input_height,
input_width, output_height, output_width, ksize_height, ksize_width,
stride_height, stride_width, padding_height, padding_width, adaptive,
input_grad_data);
}
};
template class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, float,
int>;
template class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext, float,
int>;
template class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, double,
int>;
template class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext,
double, int>;
template <typename T1, typename T2>
__global__ void KernelMaxPool3DWithIdx(
const int nthreads, const T1* input_data, const int channels,
const int input_depth, const int input_height, const int input_width,
const int output_depth, const int output_height, const int output_width,
const int ksize_depth, const int ksize_height, const int ksize_width,
const int stride_depth, const int stride_height, const int stride_width,
const int padding_depth, const int padding_height, const int padding_width,
bool adaptive, T1* output_data, T2* mask_data) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int pw = index % output_width;
int ph = (index / output_width) % output_height;
int pd = (index / output_width / output_height) % output_depth;
int c = (index / output_width / output_height / output_depth) % channels;
int batch_idx =
index / output_width / output_height / output_depth / channels;
int dstart, dend;
int hstart, hend;
int wstart, wend;
if (adaptive) {
dstart = AdaptStartIndex(pd, input_depth, output_depth);
dend = AdaptEndIndex(pd, input_depth, output_depth);
hstart = AdaptStartIndex(ph, input_height, output_height);
hend = AdaptEndIndex(ph, input_height, output_height);
wstart = AdaptStartIndex(pw, input_width, output_width);
wend = AdaptEndIndex(pw, input_width, output_width);
} else {
dstart = pd * stride_depth - padding_depth;
hstart = ph * stride_height - padding_height;
wstart = pw * stride_width - padding_width;
dend = min(dstart + ksize_depth, input_depth);
hend = min(hstart + ksize_height, input_height);
wend = min(wstart + ksize_width, input_width);
dstart = max(dstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
}
T1 ele = -FLT_MAX;
int max_index = -1;
input_data +=
(batch_idx * channels + c) * input_depth * input_height * input_width;
for (int d = dstart; d < dend; ++d) {
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (ele < input_data[(d * input_height + h) * input_width + w]) {
max_index = (d * input_height + h) * input_width + w;
ele = input_data[max_index];
}
}
}
}
output_data[index] = ele;
mask_data[index] = max_index;
}
}
template <typename T1, typename T2>
__global__ void KernelMaxPool3DWithIdxGrad(
const int nthreads, const T1* output_grad, const T2* mask,
const int channels, const int input_depth, const int input_height,
const int input_width, const int output_depth, const int output_height,
const int output_width, const int ksize_depth, const int ksize_height,
const int ksize_width, const int stride_depth, const int stride_height,
const int stride_width, const int padding_depth, const int padding_height,
const int padding_width, bool adaptive, T1* input_grad) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int w_offset = index % input_width;
int h_offset = (index / input_width) % input_height;
int d_offset = (index / input_width / input_height) % input_depth;
int offsetC = (index / input_width / input_height / input_depth) % channels;
int batch_idx = index / input_width / input_height / input_depth / channels;
int pdstart, pdend;
int phstart, phend;
int pwstart, pwend;
if (adaptive) {
pdstart = d_offset * output_depth / input_depth;
pdend =
min((d_offset + 1) * output_depth / input_depth + 1, output_depth);
phstart = h_offset * output_height / input_height;
phend =
min((h_offset + 1) * output_height / input_height + 1, output_height);
pwstart = w_offset * output_width / input_width;
pwend =
min((w_offset + 1) * output_width / input_width + 1, output_width);
} else {
pdstart =
(d_offset + padding_depth < ksize_depth)
? 0
: (d_offset + padding_depth - ksize_depth) / stride_depth + 1;
phstart =
(h_offset + padding_height < ksize_height)
? 0
: (h_offset + padding_height - ksize_height) / stride_height + 1;
pwstart =
(w_offset + padding_width < ksize_width)
? 0
: (w_offset + padding_width - ksize_width) / stride_width + 1;
pdend = min((d_offset + padding_depth) / stride_depth + 1, output_depth);
phend =
min((h_offset + padding_height) / stride_height + 1, output_height);
pwend = min((w_offset + padding_width) / stride_width + 1, output_width);
}
T1 gradient = 0;
int input_current_feature_map_idx =
(d_offset * input_height + h_offset) * input_width + w_offset;
int output_idx = (batch_idx * channels + offsetC) * output_depth *
output_height * output_width;
mask += output_idx;
output_grad += output_idx;
for (int pd = pdstart; pd < pdend; ++pd) {
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (mask[(pd * output_height + ph) * output_width + pw] ==
input_current_feature_map_idx)
gradient +=
output_grad[(pd * output_height + ph) * output_width + pw];
}
}
}
input_grad[index] = gradient;
}
}
/*
* All tensors are in NCDHW format.
* Ksize, strides, paddings are three elements. These three elements represent
* depth, height and width, respectively.
*/
template <typename T1, typename T2>
class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, T1, T2> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input, const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings, bool adaptive,
framework::Tensor* output, framework::Tensor* mask) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_depth = input.dims()[2];
const int input_height = input.dims()[3];
const int input_width = input.dims()[4];
const int output_channels = output->dims()[1];
const int output_depth = output->dims()[2];
const int output_height = output->dims()[3];
const int output_width = output->dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T1* input_data = input.data<T1>();
T1* output_data = output->mutable_data<T1>(context.GetPlace());
T2* mask_data = mask->mutable_data<T2>(context.GetPlace());
int nthreads = batch_size * output_channels * output_depth * output_height *
output_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
hipLaunchKernelGGL(( KernelMaxPool3DWithIdx<T1, T2>), dim3(grid), dim3(threads), 0, context.stream(),
nthreads, input_data, input_channels, input_depth, input_height,
input_width, output_depth, output_height, output_width, ksize_depth,
ksize_height, ksize_width, stride_depth, stride_height, stride_width,
padding_depth, padding_height, padding_width, adaptive, output_data,
mask_data);
}
};
/*
* All tensors are in NCDHW format.
* Ksize, strides, paddings are three elements. These three elements represent
* depth, height and width, respectively.
*/
template <typename T1, typename T2>
class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext, T1, T2> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& output_grad,
const framework::Tensor& mask, const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings, bool adaptive,
framework::Tensor* input_grad) {
const int batch_size = input_grad->dims()[0];
const int input_channels = input_grad->dims()[1];
const int input_depth = input_grad->dims()[2];
const int input_height = input_grad->dims()[3];
const int input_width = input_grad->dims()[4];
const int output_depth = output_grad.dims()[2];
const int output_height = output_grad.dims()[3];
const int output_width = output_grad.dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T1* output_grad_data = output_grad.data<T1>();
const T2* mask_data = mask.data<T2>();
T1* input_grad_data = input_grad->mutable_data<T1>(context.GetPlace());
int nthreads =
batch_size * input_channels * input_depth * input_height * input_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
hipLaunchKernelGGL(( KernelMaxPool3DWithIdxGrad<T1, T2>), dim3(grid), dim3(threads), 0, context.stream(),
nthreads, output_grad_data, mask_data, input_channels, input_depth,
input_height, input_width, output_depth, output_height, output_width,
ksize_depth, ksize_height, ksize_width, stride_depth, stride_height,
stride_width, padding_depth, padding_height, padding_width, adaptive,
input_grad_data);
}
};
template class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, float,
int>;
template class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext, float,
int>;
template class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, double,
int>;
template class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext,
double, int>;
} // namespace math
} // namespace operators
} // namespace paddle
| f6bc915d2ec34967c22e35cb736bff0f2cd065b1.cu | /* Copyright (c) 2016 paddlepaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include <vector>
#include "paddle/fluid/operators/math/pooling.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle {
namespace operators {
namespace math {
template <typename PoolProcess, typename T>
__global__ void KernelPool2D(const int nthreads, const T* input_data,
const int channels, const int input_height,
const int input_width, const int output_height,
const int output_width, const int ksize_height,
const int ksize_width, const int stride_height,
const int stride_width, const int padding_height,
const int padding_width, PoolProcess pool_process,
bool exclusive, bool adaptive, T* output_data) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int pw = index % output_width;
int ph = (index / output_width) % output_height;
int c = (index / output_width / output_height) % channels;
int batch_idx = index / output_width / output_height / channels;
int hstart, hend;
int wstart, wend;
if (adaptive) {
hstart = AdaptStartIndex(ph, input_height, output_height);
hend = AdaptEndIndex(ph, input_height, output_height);
wstart = AdaptStartIndex(pw, input_width, output_width);
wend = AdaptEndIndex(pw, input_width, output_width);
} else {
hstart = ph * stride_height - padding_height;
hend = min(hstart + ksize_height, input_height);
hstart = max(hstart, 0);
wstart = pw * stride_width - padding_width;
wend = min(wstart + ksize_width, input_width);
wstart = max(wstart, 0);
}
input_data += (batch_idx * channels + c) * input_height * input_width;
T ele = pool_process.initial();
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
pool_process.compute(input_data[h * input_width + w], &ele);
}
}
int pool_size = (exclusive || adaptive) ? (hend - hstart) * (wend - wstart)
: ksize_height * ksize_width;
pool_process.finalize(static_cast<T>(pool_size), &ele);
output_data[index] = ele;
}
}
template <typename PoolProcess, typename T>
__global__ void KernelPool2DGrad(
const int nthreads, const T* input_data, const T* output_data,
const T* output_grad, const int channels, const int input_height,
const int input_width, const int output_height, const int output_width,
const int ksize_height, const int ksize_width, const int stride_height,
const int stride_width, const int padding_height, const int padding_width,
PoolProcess pool_process, bool exclusive, bool adaptive, T* input_grad) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int w_offset = index % input_width + padding_width;
int h_offset = (index / input_width) % input_height + padding_height;
int offsetC = (index / input_width / input_height) % channels;
int batch_idx = index / input_width / input_height / channels;
int phstart, phend;
int pwstart, pwend;
if (adaptive) {
phstart = h_offset * output_height / input_height;
phend =
min((h_offset + 1) * output_height / input_height + 1, output_height);
pwstart = w_offset * output_width / input_width;
pwend =
min((w_offset + 1) * output_width / input_width + 1, output_width);
} else {
phstart = (h_offset < ksize_height)
? 0
: (h_offset - ksize_height) / stride_height + 1;
pwstart = (w_offset < ksize_width)
? 0
: (w_offset - ksize_width) / stride_width + 1;
phend = min(h_offset / stride_height + 1, output_height);
pwend = min(w_offset / stride_width + 1, output_width);
}
T gradient = 0;
T input = input_data[index];
int output_idx =
(batch_idx * channels + offsetC) * output_height * output_width;
output_data += output_idx;
output_grad += output_idx;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
int pool_size;
if (adaptive) {
pool_size = static_cast<int>(ceil(static_cast<double>(input_height) /
ksize_height)) *
static_cast<int>(
ceil(static_cast<double>(input_width) / ksize_width));
} else {
int hstart = ph * stride_height - padding_height;
int wstart = pw * stride_width - padding_width;
int hend = min(hstart + ksize_height, input_height);
int wend = min(wstart + ksize_width, input_width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
pool_size = exclusive ? (hend - hstart) * (wend - wstart)
: ksize_height * ksize_width;
}
int output_sub_idx = ph * output_width + pw;
pool_process.compute(input, output_data[output_sub_idx],
output_grad[output_sub_idx],
static_cast<T>(1.0 / pool_size), &gradient);
}
}
input_grad[index] = gradient;
}
}
template <typename T>
__global__ void KernelMaxPool2DGrad(
const int nthreads, const T* input_data, const T* output_data,
const T* output_grad, const int channels, const int input_height,
const int input_width, const int output_height, const int output_width,
const int ksize_height, const int ksize_width, const int stride_height,
const int stride_width, const int padding_height, const int padding_width,
T* input_grad) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int pw = index % output_width;
int ph = (index / output_width) % output_height;
int c = (index / output_width / output_height) % channels;
int batch_idx = index / output_width / output_height / channels;
int hstart = ph * stride_height - padding_height;
int hend = min(hstart + ksize_height, input_height);
hstart = max(hstart, 0);
int wstart = pw * stride_width - padding_width;
int wend = min(wstart + ksize_width, input_width);
wstart = max(wstart, 0);
input_data += (batch_idx * channels + c) * input_height * input_width;
input_grad += (batch_idx * channels + c) * input_height * input_width;
T ele = output_data[index];
int maxIndex = -1;
bool stop = false;
for (int h = hstart; h < hend && !stop; ++h) {
for (int w = wstart; w < wend && !stop; ++w) {
if (ele == input_data[h * input_width + w]) {
maxIndex = h * input_width + w;
stop = true;
}
}
}
if (maxIndex != -1) {
// atomic add
platform::CudaAtomicAdd(input_grad + maxIndex, output_grad[index]);
}
}
}
template <typename PoolProcess, typename T>
void Pool2dDirectCUDAFunctor<PoolProcess, T>::operator()(
const T* input, const std::vector<int>& input_shape,
const std::vector<int>& output_shape, const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& paddings,
PoolProcess pool_compute, bool exclusive, T* output, cudaStream_t stream) {
const int batch_size = input_shape[0];
const int input_channels = input_shape[1];
const int input_height = input_shape[2];
const int input_width = input_shape[3];
const int output_channels = output_shape[1];
const int output_height = output_shape[2];
const int output_width = output_shape[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
int nthreads = batch_size * output_channels * output_height * output_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
KernelPool2D<PoolProcess, T><<<grid, threads, 0, stream>>>(
nthreads, input, input_channels, input_height, input_width, output_height,
output_width, ksize_height, ksize_width, stride_height, stride_width,
padding_height, padding_width, pool_compute, exclusive, false, output);
}
/*
* All tensors are in NCHW format.
* Ksize, strides, paddings are two elements. These two elements represent
* height and width, respectively.
*/
template <typename PoolProcess, typename T>
class Pool2dFunctor<platform::CUDADeviceContext, PoolProcess, T> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input, const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings, PoolProcess pool_process,
bool exclusive, bool adaptive, framework::Tensor* output) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_height = input.dims()[2];
const int input_width = input.dims()[3];
const int output_channels = output->dims()[1];
const int output_height = output->dims()[2];
const int output_width = output->dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T* input_data = input.data<T>();
T* output_data = output->mutable_data<T>(context.GetPlace());
int nthreads = batch_size * output_channels * output_height * output_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
KernelPool2D<PoolProcess, T><<<grid, threads, 0, context.stream()>>>(
nthreads, input_data, input_channels, input_height, input_width,
output_height, output_width, ksize_height, ksize_width, stride_height,
stride_width, padding_height, padding_width, pool_process, exclusive,
adaptive, output_data);
}
};
/*
* All tensors are in NCHW format.
* Ksize, strides, paddings are two elements. These two elements represent
* height and width, respectively.
*/
template <typename PoolProcess, typename T>
class Pool2dGradFunctor<platform::CUDADeviceContext, PoolProcess, T> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input,
const framework::Tensor& output,
const framework::Tensor& output_grad,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings, PoolProcess pool_process,
bool exclusive, bool adaptive,
framework::Tensor* input_grad) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_height = input.dims()[2];
const int input_width = input.dims()[3];
const int output_height = output.dims()[2];
const int output_width = output.dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace());
int nthreads = batch_size * input_channels * input_height * input_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
KernelPool2DGrad<PoolProcess, T><<<grid, threads, 0, context.stream()>>>(
nthreads, input_data, output_data, output_grad_data, input_channels,
input_height, input_width, output_height, output_width, ksize_height,
ksize_width, stride_height, stride_width, padding_height, padding_width,
pool_process, exclusive, adaptive, input_grad_data);
}
};
/*
* All tensors are in NCHW format.
* Ksize, strides, paddings are two elements. These two elements represent
* height and width, respectively.
*/
template <typename T>
class MaxPool2dGradFunctor<platform::CUDADeviceContext, T> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input,
const framework::Tensor& output,
const framework::Tensor& output_grad,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
framework::Tensor* input_grad) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_height = input.dims()[2];
const int input_width = input.dims()[3];
const int output_channels = output.dims()[1];
const int output_height = output.dims()[2];
const int output_width = output.dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace());
int nthreads = batch_size * output_channels * output_height * output_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
KernelMaxPool2DGrad<T><<<grid, threads, 0, context.stream()>>>(
nthreads, input_data, output_data, output_grad_data, input_channels,
input_height, input_width, output_height, output_width, ksize_height,
ksize_width, stride_height, stride_width, padding_height, padding_width,
input_grad_data);
}
};
template class Pool2dDirectCUDAFunctor<paddle::operators::math::MaxPool<float>,
float>;
template class Pool2dDirectCUDAFunctor<paddle::operators::math::AvgPool<float>,
float>;
template class MaxPool2dGradFunctor<platform::CUDADeviceContext, float>;
template class MaxPool2dGradFunctor<platform::CUDADeviceContext, double>;
template class Pool2dFunctor<platform::CUDADeviceContext,
paddle::operators::math::MaxPool<float>, float>;
template class Pool2dFunctor<platform::CUDADeviceContext,
paddle::operators::math::AvgPool<float>, float>;
template class Pool2dGradFunctor<platform::CUDADeviceContext,
paddle::operators::math::MaxPoolGrad<float>,
float>;
template class Pool2dGradFunctor<platform::CUDADeviceContext,
paddle::operators::math::AvgPoolGrad<float>,
float>;
template class Pool2dFunctor<platform::CUDADeviceContext,
paddle::operators::math::MaxPool<double>, double>;
template class Pool2dFunctor<platform::CUDADeviceContext,
paddle::operators::math::AvgPool<double>, double>;
template class Pool2dGradFunctor<platform::CUDADeviceContext,
paddle::operators::math::MaxPoolGrad<double>,
double>;
template class Pool2dGradFunctor<platform::CUDADeviceContext,
paddle::operators::math::AvgPoolGrad<double>,
double>;
template <typename PoolProcess, typename T>
__global__ void KernelPool3D(
const int nthreads, const T* input_data, const int channels,
const int input_depth, const int input_height, const int input_width,
const int output_depth, const int output_height, const int output_width,
const int ksize_depth, const int ksize_height, const int ksize_width,
const int stride_depth, const int stride_height, const int stride_width,
const int padding_depth, const int padding_height, const int padding_width,
PoolProcess pool_process, bool exclusive, bool adaptive, T* output_data) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int pw = index % output_width;
int ph = (index / output_width) % output_height;
int pd = (index / output_width / output_height) % output_depth;
int c = (index / output_width / output_height / output_depth) % channels;
int batch_idx =
index / output_width / output_height / output_depth / channels;
int dstart, dend;
int hstart, hend;
int wstart, wend;
if (adaptive) {
dstart = AdaptStartIndex(pd, input_depth, output_depth);
dend = AdaptEndIndex(pd, input_depth, output_depth);
hstart = AdaptStartIndex(ph, input_height, output_height);
hend = AdaptEndIndex(ph, input_height, output_height);
wstart = AdaptStartIndex(pw, input_width, output_width);
wend = AdaptEndIndex(pw, input_width, output_width);
} else {
dstart = pd * stride_depth - padding_depth;
hstart = ph * stride_height - padding_height;
wstart = pw * stride_width - padding_width;
dend = min(dstart + ksize_depth, input_depth);
hend = min(hstart + ksize_height, input_height);
wend = min(wstart + ksize_width, input_width);
dstart = max(dstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
}
T ele = pool_process.initial();
input_data +=
(batch_idx * channels + c) * input_depth * input_height * input_width;
for (int d = dstart; d < dend; ++d) {
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
pool_process.compute(
input_data[(d * input_height + h) * input_width + w], &ele);
}
}
}
int pool_size = (exclusive || adaptive)
? (dend - dstart) * (hend - hstart) * (wend - wstart)
: ksize_depth * ksize_height * ksize_width;
pool_process.finalize(static_cast<T>(pool_size), &ele);
output_data[index] = ele;
}
}
template <typename PoolProcess, typename T>
__global__ void KernelPool3DGrad(
const int nthreads, const T* input_data, const T* output_data,
const T* output_grad, const int channels, const int input_depth,
const int input_height, const int input_width, const int output_depth,
const int output_height, const int output_width, const int ksize_depth,
const int ksize_height, const int ksize_width, const int stride_depth,
const int stride_height, const int stride_width, const int padding_depth,
const int padding_height, const int padding_width, PoolProcess pool_process,
bool exclusive, bool adaptive, T* input_grad) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int w_offset = index % input_width + padding_width;
int h_offset = (index / input_width) % input_height + padding_height;
int d_offset =
(index / input_width / input_height) % input_depth + padding_depth;
int offsetC = (index / input_width / input_height / input_depth) % channels;
int batch_idx = index / input_width / input_height / input_depth / channels;
int pdstart, pdend;
int phstart, phend;
int pwstart, pwend;
if (adaptive) {
pdstart = d_offset * output_depth / input_depth;
pdend =
min((d_offset + 1) * output_depth / input_depth + 1, output_depth);
phstart = h_offset * output_height / input_height;
phend =
min((h_offset + 1) * output_height / input_height + 1, output_height);
pwstart = w_offset * output_width / input_width;
pwend =
min((w_offset + 1) * output_width / input_width + 1, output_width);
} else {
pdstart = (d_offset < ksize_depth)
? 0
: (d_offset - ksize_depth) / stride_depth + 1;
phstart = (h_offset < ksize_height)
? 0
: (h_offset - ksize_height) / stride_height + 1;
pwstart = (w_offset < ksize_width)
? 0
: (w_offset - ksize_width) / stride_width + 1;
pdend = min((d_offset) / stride_depth + 1, output_depth);
phend = min((h_offset) / stride_height + 1, output_height);
pwend = min((w_offset) / stride_width + 1, output_width);
}
T gradient = 0;
T input = input_data[index];
int output_idx = (batch_idx * channels + offsetC) * output_depth *
output_height * output_width;
output_data += output_idx;
output_grad += output_idx;
for (int pd = pdstart; pd < pdend; ++pd) {
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int pool_size;
if (adaptive) {
pool_size =
static_cast<int>(
ceil(static_cast<double>(input_depth) / ksize_depth)) *
static_cast<int>(
ceil(static_cast<double>(input_height) / ksize_height)) *
static_cast<int>(
ceil(static_cast<double>(input_width) / ksize_width));
} else {
int dstart = pd * stride_depth - padding_depth;
int hstart = ph * stride_height - padding_height;
int wstart = pw * stride_width - padding_width;
int dend = min(dstart + ksize_depth, input_depth);
int hend = min(hstart + ksize_height, input_height);
int wend = min(wstart + ksize_width, input_width);
dstart = max(dstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
pool_size =
exclusive ? (dend - dstart) * (hend - hstart) * (wend - wstart)
: ksize_depth * ksize_height * ksize_width;
}
int output_sub_idx = (pd * output_height + ph) * output_width + pw;
pool_process.compute(input, output_data[output_sub_idx],
output_grad[output_sub_idx],
static_cast<T>(1.0 / pool_size), &gradient);
}
}
}
input_grad[index] = gradient;
}
}
template <typename T>
__global__ void KernelMaxPool3DGrad(
const int nthreads, const T* input_data, const T* output_data,
const T* output_grad, const int channels, const int input_depth,
const int input_height, const int input_width, const int output_depth,
const int output_height, const int output_width, const int ksize_depth,
const int ksize_height, const int ksize_width, const int stride_depth,
const int stride_height, const int stride_width, const int padding_depth,
const int padding_height, const int padding_width, T* input_grad) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int pw = index % output_width;
int ph = (index / output_width) % output_height;
int pd = (index / output_width / output_height) % output_depth;
int c = (index / output_width / output_height / output_depth) % channels;
int batch_idx =
index / output_width / output_height / output_depth / channels;
int dstart = pd * stride_depth - padding_depth;
int hstart = ph * stride_height - padding_height;
int wstart = pw * stride_width - padding_width;
int dend = min(dstart + ksize_depth, input_depth);
int hend = min(hstart + ksize_height, input_height);
int wend = min(wstart + ksize_width, input_width);
dstart = max(dstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
T ele = output_data[index];
bool stop = false;
int maxIdx = -1;
input_data +=
(batch_idx * channels + c) * input_depth * input_height * input_width;
input_grad +=
(batch_idx * channels + c) * input_depth * input_height * input_width;
for (int d = dstart; d < dend && !stop; ++d) {
for (int h = hstart; h < hend && !stop; ++h) {
for (int w = wstart; w < wend && !stop; ++w) {
if (ele == input_data[(d * input_height + h) * input_width + w]) {
stop = true;
maxIdx = (d * input_height + h) * input_width + w;
}
}
}
}
if (maxIdx != -1) {
// atomic add
platform::CudaAtomicAdd(input_grad + maxIdx, output_grad[index]);
}
}
}
/*
* All tensors are in NCDHW format.
* Ksize, strides, paddings are three elements. These three elements represent
* depth, height and width, respectively.
*/
template <typename PoolProcess, class T>
class Pool3dFunctor<platform::CUDADeviceContext, PoolProcess, T> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input, const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings, PoolProcess pool_process,
bool exclusive, bool adaptive, framework::Tensor* output) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_depth = input.dims()[2];
const int input_height = input.dims()[3];
const int input_width = input.dims()[4];
const int output_channels = output->dims()[1];
const int output_depth = output->dims()[2];
const int output_height = output->dims()[3];
const int output_width = output->dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T* input_data = input.data<T>();
T* output_data = output->mutable_data<T>(context.GetPlace());
int nthreads = batch_size * output_channels * output_depth * output_height *
output_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
KernelPool3D<PoolProcess, T><<<grid, threads, 0, context.stream()>>>(
nthreads, input_data, input_channels, input_depth, input_height,
input_width, output_depth, output_height, output_width, ksize_depth,
ksize_height, ksize_width, stride_depth, stride_height, stride_width,
padding_depth, padding_height, padding_width, pool_process, exclusive,
adaptive, output_data);
}
};
/*
* All tensors are in NCDHW format.
* Ksize, strides, paddings are three elements. These three elements represent
* depth, height and width, respectively.
*/
template <typename PoolProcess, class T>
class Pool3dGradFunctor<platform::CUDADeviceContext, PoolProcess, T> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input,
const framework::Tensor& output,
const framework::Tensor& output_grad,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings, PoolProcess pool_process,
bool exclusive, bool adaptive,
framework::Tensor* input_grad) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_depth = input.dims()[2];
const int input_height = input.dims()[3];
const int input_width = input.dims()[4];
const int output_channels = output.dims()[1];
const int output_depth = output.dims()[2];
const int output_height = output.dims()[3];
const int output_width = output.dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace());
int nthreads =
batch_size * input_channels * input_depth * input_height * input_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
KernelPool3DGrad<PoolProcess, T><<<grid, threads, 0, context.stream()>>>(
nthreads, input_data, output_data, output_grad_data, input_channels,
input_depth, input_height, input_width, output_depth, output_height,
output_width, ksize_depth, ksize_height, ksize_width, stride_depth,
stride_height, stride_width, padding_depth, padding_height,
padding_width, pool_process, exclusive, adaptive, input_grad_data);
}
};
/*
* All tensors are in NCDHW format.
* Ksize, strides, paddings are three elements. These three elements represent
* depth, height and width, respectively.
*/
template <class T>
class MaxPool3dGradFunctor<platform::CUDADeviceContext, T> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input,
const framework::Tensor& output,
const framework::Tensor& output_grad,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
framework::Tensor* input_grad) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_depth = input.dims()[2];
const int input_height = input.dims()[3];
const int input_width = input.dims()[4];
const int output_channels = output.dims()[1];
const int output_depth = output.dims()[2];
const int output_height = output.dims()[3];
const int output_width = output.dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace());
int nthreads = batch_size * output_channels * output_depth * output_height *
output_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
KernelMaxPool3DGrad<T><<<grid, threads, 0, context.stream()>>>(
nthreads, input_data, output_data, output_grad_data, input_channels,
input_depth, input_height, input_width, output_depth, output_height,
output_width, ksize_depth, ksize_height, ksize_width, stride_depth,
stride_height, stride_width, padding_depth, padding_height,
padding_width, input_grad_data);
}
};
template class MaxPool3dGradFunctor<platform::CUDADeviceContext, float>;
template class MaxPool3dGradFunctor<platform::CUDADeviceContext, double>;
template class Pool3dFunctor<platform::CUDADeviceContext,
paddle::operators::math::MaxPool<float>, float>;
template class Pool3dFunctor<platform::CUDADeviceContext,
paddle::operators::math::AvgPool<float>, float>;
template class Pool3dGradFunctor<platform::CUDADeviceContext,
paddle::operators::math::MaxPoolGrad<float>,
float>;
template class Pool3dGradFunctor<platform::CUDADeviceContext,
paddle::operators::math::AvgPoolGrad<float>,
float>;
template class Pool3dFunctor<platform::CUDADeviceContext,
paddle::operators::math::MaxPool<double>, double>;
template class Pool3dFunctor<platform::CUDADeviceContext,
paddle::operators::math::AvgPool<double>, double>;
template class Pool3dGradFunctor<platform::CUDADeviceContext,
paddle::operators::math::MaxPoolGrad<double>,
double>;
template class Pool3dGradFunctor<platform::CUDADeviceContext,
paddle::operators::math::AvgPoolGrad<double>,
double>;
template <typename T1, typename T2>
__global__ void KernelMaxPool2dWithIdx(
const int nthreads, const T1* input_data, const int channels,
const int input_height, const int input_width, const int output_height,
const int output_width, const int ksize_height, const int ksize_width,
const int stride_height, const int stride_width, const int padding_height,
const int padding_width, bool adaptive, T1* output_data, T2* mask_data) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int pw = index % output_width;
int ph = (index / output_width) % output_height;
int c = (index / output_width / output_height) % channels;
int batch_idx = index / output_width / output_height / channels;
int hstart, hend;
int wstart, wend;
if (adaptive) {
hstart = AdaptStartIndex(ph, input_height, output_height);
hend = AdaptEndIndex(ph, input_height, output_height);
wstart = AdaptStartIndex(pw, input_width, output_width);
wend = AdaptEndIndex(pw, input_width, output_width);
} else {
hstart = ph * stride_height - padding_height;
hend = min(hstart + ksize_height, input_height);
hstart = max(hstart, 0);
wstart = pw * stride_width - padding_width;
wend = min(wstart + ksize_width, input_width);
wstart = max(wstart, 0);
}
input_data += (batch_idx * channels + c) * input_height * input_width;
T1 ele = -FLT_MAX;
int max_index = -1;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int input_index = h * input_width + w;
if (ele < input_data[input_index]) {
max_index = input_index;
ele = input_data[input_index];
}
}
}
output_data[index] = ele;
mask_data[index] = max_index;
}
}
template <typename T1, typename T2>
__global__ void KernelMaxPool2DWithIdxGrad(
const int nthreads, const T1* output_grad, const T2* mask_data,
const int channels, const int input_height, const int input_width,
const int output_height, const int output_width, const int ksize_height,
const int ksize_width, const int stride_height, const int stride_width,
const int padding_height, const int padding_width, bool adaptive,
T1* input_grad) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int w_offset = index % input_width;
int h_offset = (index / input_width) % input_height;
int offsetC = (index / input_width / input_height) % channels;
int batch_idx = index / input_width / input_height / channels;
int phstart, phend;
int pwstart, pwend;
if (adaptive) {
phstart = h_offset * output_height / input_height;
phend =
min((h_offset + 1) * output_height / input_height + 1, output_height);
pwstart = w_offset * output_width / input_width;
pwend =
min((w_offset + 1) * output_width / input_width + 1, output_width);
} else {
phstart =
(h_offset + padding_height < ksize_height)
? 0
: (h_offset + padding_height - ksize_height) / stride_height + 1;
pwstart =
(w_offset + padding_width < ksize_width)
? 0
: (w_offset + padding_width - ksize_width) / stride_width + 1;
phend =
min((h_offset + padding_height) / stride_height + 1, output_height);
pwend = min((w_offset + padding_width) / stride_width + 1, output_width);
}
T1 gradient = 0;
int input_current_featuremap_idx = h_offset * input_width + w_offset;
int output_idx =
(batch_idx * channels + offsetC) * output_height * output_width;
mask_data += output_idx;
output_grad += output_idx;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (mask_data[ph * output_width + pw] == input_current_featuremap_idx)
gradient += output_grad[ph * output_width + pw];
}
}
input_grad[index] = gradient;
}
}
/*
* All tensors are in NCHW format.
* Ksize, strides, paddings are two elements. These two elements represent
* height and width, respectively.
*/
template <typename T1, typename T2>
class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, T1, T2> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input, const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings, bool adaptive,
framework::Tensor* output, framework::Tensor* mask) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_height = input.dims()[2];
const int input_width = input.dims()[3];
const int output_channels = output->dims()[1];
const int output_height = output->dims()[2];
const int output_width = output->dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T1* input_data = input.data<T1>();
T1* output_data = output->mutable_data<T1>(context.GetPlace());
T2* mask_data = mask->mutable_data<T2>(context.GetPlace());
int nthreads = batch_size * output_channels * output_height * output_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
KernelMaxPool2dWithIdx<T1, T2><<<grid, threads, 0, context.stream()>>>(
nthreads, input_data, input_channels, input_height, input_width,
output_height, output_width, ksize_height, ksize_width, stride_height,
stride_width, padding_height, padding_width, adaptive, output_data,
mask_data);
}
};
/*
* All tensors are in NCHW format.
* Ksize, strides, paddings are two elements. These two elements represent
* height and width, respectively.
*/
template <typename T1, typename T2>
class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext, T1, T2> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& output_grad,
const framework::Tensor& mask, const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings, bool adaptive,
framework::Tensor* input_grad) {
const int batch_size = input_grad->dims()[0];
const int input_channels = input_grad->dims()[1];
const int input_height = input_grad->dims()[2];
const int input_width = input_grad->dims()[3];
const int output_height = output_grad.dims()[2];
const int output_width = output_grad.dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T2* mask_data = mask.data<T2>();
const T1* output_grad_data = output_grad.data<T1>();
T1* input_grad_data = input_grad->mutable_data<T1>(context.GetPlace());
int nthreads = batch_size * input_channels * input_height * input_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
KernelMaxPool2DWithIdxGrad<T1, T2><<<grid, threads, 0, context.stream()>>>(
nthreads, output_grad_data, mask_data, input_channels, input_height,
input_width, output_height, output_width, ksize_height, ksize_width,
stride_height, stride_width, padding_height, padding_width, adaptive,
input_grad_data);
}
};
template class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, float,
int>;
template class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext, float,
int>;
template class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, double,
int>;
template class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext,
double, int>;
template <typename T1, typename T2>
__global__ void KernelMaxPool3DWithIdx(
const int nthreads, const T1* input_data, const int channels,
const int input_depth, const int input_height, const int input_width,
const int output_depth, const int output_height, const int output_width,
const int ksize_depth, const int ksize_height, const int ksize_width,
const int stride_depth, const int stride_height, const int stride_width,
const int padding_depth, const int padding_height, const int padding_width,
bool adaptive, T1* output_data, T2* mask_data) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int pw = index % output_width;
int ph = (index / output_width) % output_height;
int pd = (index / output_width / output_height) % output_depth;
int c = (index / output_width / output_height / output_depth) % channels;
int batch_idx =
index / output_width / output_height / output_depth / channels;
int dstart, dend;
int hstart, hend;
int wstart, wend;
if (adaptive) {
dstart = AdaptStartIndex(pd, input_depth, output_depth);
dend = AdaptEndIndex(pd, input_depth, output_depth);
hstart = AdaptStartIndex(ph, input_height, output_height);
hend = AdaptEndIndex(ph, input_height, output_height);
wstart = AdaptStartIndex(pw, input_width, output_width);
wend = AdaptEndIndex(pw, input_width, output_width);
} else {
dstart = pd * stride_depth - padding_depth;
hstart = ph * stride_height - padding_height;
wstart = pw * stride_width - padding_width;
dend = min(dstart + ksize_depth, input_depth);
hend = min(hstart + ksize_height, input_height);
wend = min(wstart + ksize_width, input_width);
dstart = max(dstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
}
T1 ele = -FLT_MAX;
int max_index = -1;
input_data +=
(batch_idx * channels + c) * input_depth * input_height * input_width;
for (int d = dstart; d < dend; ++d) {
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (ele < input_data[(d * input_height + h) * input_width + w]) {
max_index = (d * input_height + h) * input_width + w;
ele = input_data[max_index];
}
}
}
}
output_data[index] = ele;
mask_data[index] = max_index;
}
}
template <typename T1, typename T2>
__global__ void KernelMaxPool3DWithIdxGrad(
const int nthreads, const T1* output_grad, const T2* mask,
const int channels, const int input_depth, const int input_height,
const int input_width, const int output_depth, const int output_height,
const int output_width, const int ksize_depth, const int ksize_height,
const int ksize_width, const int stride_depth, const int stride_height,
const int stride_width, const int padding_depth, const int padding_height,
const int padding_width, bool adaptive, T1* input_grad) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int w_offset = index % input_width;
int h_offset = (index / input_width) % input_height;
int d_offset = (index / input_width / input_height) % input_depth;
int offsetC = (index / input_width / input_height / input_depth) % channels;
int batch_idx = index / input_width / input_height / input_depth / channels;
int pdstart, pdend;
int phstart, phend;
int pwstart, pwend;
if (adaptive) {
pdstart = d_offset * output_depth / input_depth;
pdend =
min((d_offset + 1) * output_depth / input_depth + 1, output_depth);
phstart = h_offset * output_height / input_height;
phend =
min((h_offset + 1) * output_height / input_height + 1, output_height);
pwstart = w_offset * output_width / input_width;
pwend =
min((w_offset + 1) * output_width / input_width + 1, output_width);
} else {
pdstart =
(d_offset + padding_depth < ksize_depth)
? 0
: (d_offset + padding_depth - ksize_depth) / stride_depth + 1;
phstart =
(h_offset + padding_height < ksize_height)
? 0
: (h_offset + padding_height - ksize_height) / stride_height + 1;
pwstart =
(w_offset + padding_width < ksize_width)
? 0
: (w_offset + padding_width - ksize_width) / stride_width + 1;
pdend = min((d_offset + padding_depth) / stride_depth + 1, output_depth);
phend =
min((h_offset + padding_height) / stride_height + 1, output_height);
pwend = min((w_offset + padding_width) / stride_width + 1, output_width);
}
T1 gradient = 0;
int input_current_feature_map_idx =
(d_offset * input_height + h_offset) * input_width + w_offset;
int output_idx = (batch_idx * channels + offsetC) * output_depth *
output_height * output_width;
mask += output_idx;
output_grad += output_idx;
for (int pd = pdstart; pd < pdend; ++pd) {
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (mask[(pd * output_height + ph) * output_width + pw] ==
input_current_feature_map_idx)
gradient +=
output_grad[(pd * output_height + ph) * output_width + pw];
}
}
}
input_grad[index] = gradient;
}
}
/*
* All tensors are in NCDHW format.
* Ksize, strides, paddings are three elements. These three elements represent
* depth, height and width, respectively.
*/
template <typename T1, typename T2>
class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, T1, T2> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input, const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings, bool adaptive,
framework::Tensor* output, framework::Tensor* mask) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_depth = input.dims()[2];
const int input_height = input.dims()[3];
const int input_width = input.dims()[4];
const int output_channels = output->dims()[1];
const int output_depth = output->dims()[2];
const int output_height = output->dims()[3];
const int output_width = output->dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T1* input_data = input.data<T1>();
T1* output_data = output->mutable_data<T1>(context.GetPlace());
T2* mask_data = mask->mutable_data<T2>(context.GetPlace());
int nthreads = batch_size * output_channels * output_depth * output_height *
output_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
KernelMaxPool3DWithIdx<T1, T2><<<grid, threads, 0, context.stream()>>>(
nthreads, input_data, input_channels, input_depth, input_height,
input_width, output_depth, output_height, output_width, ksize_depth,
ksize_height, ksize_width, stride_depth, stride_height, stride_width,
padding_depth, padding_height, padding_width, adaptive, output_data,
mask_data);
}
};
/*
* All tensors are in NCDHW format.
* Ksize, strides, paddings are three elements. These three elements represent
* depth, height and width, respectively.
*/
template <typename T1, typename T2>
class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext, T1, T2> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& output_grad,
const framework::Tensor& mask, const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings, bool adaptive,
framework::Tensor* input_grad) {
const int batch_size = input_grad->dims()[0];
const int input_channels = input_grad->dims()[1];
const int input_depth = input_grad->dims()[2];
const int input_height = input_grad->dims()[3];
const int input_width = input_grad->dims()[4];
const int output_depth = output_grad.dims()[2];
const int output_height = output_grad.dims()[3];
const int output_width = output_grad.dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T1* output_grad_data = output_grad.data<T1>();
const T2* mask_data = mask.data<T2>();
T1* input_grad_data = input_grad->mutable_data<T1>(context.GetPlace());
int nthreads =
batch_size * input_channels * input_depth * input_height * input_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
KernelMaxPool3DWithIdxGrad<T1, T2><<<grid, threads, 0, context.stream()>>>(
nthreads, output_grad_data, mask_data, input_channels, input_depth,
input_height, input_width, output_depth, output_height, output_width,
ksize_depth, ksize_height, ksize_width, stride_depth, stride_height,
stride_width, padding_depth, padding_height, padding_width, adaptive,
input_grad_data);
}
};
template class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, float,
int>;
template class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext, float,
int>;
template class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, double,
int>;
template class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext,
double, int>;
} // namespace math
} // namespace operators
} // namespace paddle
|
0bd4130c03b3d87d1c1758cbaacadce75199d726.hip | // !!! This is a file automatically generated by hipify!!!
/** This file contains the implementation of the embedded version of our multi level memory model. The purpose of this
* code is to start with an input of simulation data, e.g. from a prior timestep and to re-sort this data first into
* cells and then to construct the according hash maps for this data. Significant amounts of this code are external,
* e.g. the sorting method itself as this was not part of our contribution.*/
#include <SPH/compactMLMResort/compactMLM.cuh>
#include <utility/include_all.h>
#include "dummy.h"
// The SPH namespace contains all of our simulation methods to avoid global namespace collisions
namespace SPH {
// compactMLM refers to the embedded version of our MLM data structure where the cell table is embedded in the has table
namespace compactMLM {
// This function is the first step of the hash table construction
// It calculates the first cell in every hash map entry
basicFunctionType cudaHashTablea(SPH::compactMLM::Memory arrays, int32_t threads) {
checkedThreadIdx(i);
auto h = arrays.resortIndex[i];
if (i == 0 || h != arrays.resortIndex[i - 1]) {
arrays.compactHashMap[h].beginning = i;
}
}
// This function is the second step of the hash map construction
// It calculates the length of every hash map entry, where a length > 1 means a collision occured
basicFunctionType cudaHashTableb(SPH::compactMLM::Memory arrays, int32_t threads) {
checkedThreadIdx(i);
auto h = arrays.resortIndex[i];
if (i == threads - 1 || arrays.resortIndex[i + 1] != arrays.resortIndex[i])
arrays.compactHashMap[h].length = i - arrays.compactHashMap[h].beginning + 1;
}
// This function is the first step of the cell table construction
// It calculates the first entry and the cell length simultaneously
basicFunctionType cudaCellTablea(SPH::compactMLM::Memory arrays, int32_t threads, int32_t *compact, float) {
checkedThreadIdx(i);
arrays.compactCellSpan[i] = cell_span{ compact[i], compact[i + 1] - compact[i] };
}
// This function is the second step of the cell table construction
// It prepares the re-sort index by storing H_c in the index arrays by using the first particle in the cell
basicFunctionType cudaCellTableb(SPH::compactMLM::Memory arrays, int32_t threads, int32_t *compact, float ratio) {
checkedThreadIdx(i);
auto x_i = arrays.position[compact[i]];
arrays.resortIndex[i] = position_to_hash(x_i, arrays.min_coord, (arrays.cell_size.x) * ratio, arrays.hash_entries);
arrays.particleparticleIndex[i] = i;
}
// This function determines the ideal resolution r for each particle according to the submission
basicFunctionType cudaMLMResolution(SPH::compactMLM::Memory arrays, float* volume) {
checkedParticleIdx(idx);
float target_neighbors = Kernel<kernel_kind::spline4>::neighbor_number * 0.95f;
float kernel_epsilon =
(1.f) * powf((target_neighbors) / ((4.f / 3.f) * CUDART_PI_F), 1.f / 3.f) / Kernel<kernel_kind::spline4>::kernel_size();
auto particle_volume = volume[idx];
auto actual_support = kernel_epsilon * powf(particle_volume, 1.f / 3.f);
float h_i = actual_support * Kernel<kernel_kind::spline4>::kernel_size();
int32_t r_i = (int32_t)math::clamp(math::floorf(math::abs(log2f(arrays.cell_size.x / h_i))) - 0, 0, arrays.mlm_schemes - 1);
float f_i = powf(0.5f, ((float)r_i));
arrays.MLMResolution[idx] = r_i;
}
// This function is used to set the re-sorting indices for particles by calculating both the Morton Code for 32 and 64 bit
// Keys as well as the hash index. Invalid particles (marked with a .w entry of FLT_MAX or a volume of 0 or with NaN
// quantities are set to a re-sorting index that puts them at the end for removal).
cellFunctionType cudaHashParticles(SPH::compactMLM::Memory arrays, float ratio, float ratio2, float* volumes, float4* velocity) {
checkedParticleIdx(i);
auto x_i = arrays.position[i];
auto x = x_i;
auto h_i = (x_i.w);
if (!(x.w == FLT_MAX || x.x != x.x || x.y != x.y || x.z != x.z || x.w != x.w) &&
!(volumes[i] != volumes[i] || volumes[i] == 0.f || volumes[i] > 1e21f)&&
!(velocity[i].x != velocity[i].x || velocity[i].y != velocity[i].y || velocity[i].z != velocity[i].z)){
arrays.ZOrder_64[i] = position_to_morton(x_i, arrays, ratio);
arrays.ZOrder_32[i] = static_cast<int32_t>(position_to_morton_32(x_i, arrays, ratio));
arrays.resortIndex[i] = position_to_hash(x_i, arrays, ratio2);
arrays.particleparticleIndex[i] = i;
}
else {
arrays.ZOrder_64[i] = INT64_MAX;
arrays.ZOrder_32[i] = INT_MAX;
arrays.resortIndex[i] = INT_MAX;
arrays.particleparticleIndex[i] = i;
}
}
// This function is used as the pre processing step of sorting the cells by calculating Z_i for every particle
// if the particle is in a different cell than the prior cell and inserts an element marking the last cell.
// This is done as described in the submission.
cellFunctionType cudaIndexCells(SPH::compactMLM::Memory arrays, int32_t threads, int32_t *cell_indices) {
checkedThreadIdx(i);
if (i == 0)
cell_indices[0] = 0;
i++;
if (hash_width == hash_length::bit_64)
cell_indices[i] = i == arrays.num_ptcls || arrays.ZOrder_64[i - 1] != arrays.ZOrder_64[i] ? i : -1;
else
cell_indices[i] = i == arrays.num_ptcls || arrays.ZOrder_32[i - 1] != arrays.ZOrder_32[i] ? i : -1;
}
// This function is used to re-ort an input array into an output array based on the sorted indices in parallel
template <typename T> hostDeviceInline void cudaSortCompactmlm(SPH::compactMLM::Memory arrays, int32_t threads,
T* input, T* output) {
checkedThreadIdx(i);
output[i] = input[arrays.particleparticleIndex[i]];
}
// This function embeds the cell information into the hash map if there is no collision for some performance gains
basicFunctionType compactHashMap(SPH::compactMLM::Memory arrays, int32_t threads) {
checkedThreadIdx(i);
auto h = arrays.resortIndex[i];
auto hashEntry = arrays.compactHashMap[h];
auto cell = arrays.compactCellSpan[i];
if (hashEntry.length == 1)
arrays.compactHashMap[h] = compactHashSpan{ 1, (uint32_t) cell.beginning, cell.length };
else
arrays.compactHashMap[h].compacted = 0;
}
// A set of wrapper functions that allow us to call the functions on either the GPU or CPU.
cellFunction(hashParticles, cudaHashParticles, "hashing particles", caches<float4>{});
basicFunction(calculateResolution, cudaMLMResolution, "generating cell table");
basicFunction(buildCellTable1, cudaCellTablea, "creating cell table I");
basicFunction(buildCellTable2, cudaCellTableb, "creating cell table II");
basicFunction(buildHashTable1, cudaHashTablea, "hashing cell table I");
basicFunction(buildHashTable2, cudaHashTableb, "hashing cell table II");
cellFunction(indexCells, cudaIndexCells, "indexing cells");
basicFunction(sort, cudaSortCompactmlm, "compact resorting cells");
basicFunction(compact, compactHashMap, "compact hashmap");
// A set of predicate functions used for array compaction and counting
struct is_valid {
hostDeviceInline bool operator()(const int x) { return x != -1; }
};
struct count_if {
hostDeviceInline bool operator()(const compactHashSpan x) { return x.beginning != UINT31_MAX && x.length > 1; }
};
struct invalid_position {
hostDeviceInline bool operator()(float4 x) { return x.w == FLT_MAX || x.x != x.x || x.y != x.y || x.z != x.z || x.w != x.w; }
};
struct invalid_volume {
hostDeviceInline bool operator()(float x) { return x == FLT_MAX || x == 0.f || x !=x; }
};
struct hash_spans {
hostDeviceInline compactHashSpan operator()() { return compactHashSpan{0, UINT31_MAX,0 }; }
};
} // namespace resort_mlm
} // namespace SPH
// This is the main function of the module and is called externally with a Memory structure pre filled
// The paper uses a reduction to determine the largest support radius, however for our simulation framework we instead
// rely on a user provided upper bound of the cell size stored in mem.cell_size. This could be replaced for a different
// implementation, but our adaptive method also has a lower bound of resolution that is fixed so we based the cell size
// on that resolution instead of recalcuating it every step.
void SPH::compactMLM::resortParticles(Memory mem) {
if (mem.num_ptcls > 0) {
// We initially start by calculating D_min and D_max as described in the submission by using reduction Operations
// Instead of directly using this we instead add a buffer on the outside of the bounding box of 3 cell widths
// as this can improve re-sorting performance slightly.
auto min_coord = math::to<float3>(algorithm::reduce_min(arrays::position::ptr, mem.num_ptcls));
min_coord -= 3.f * mem.cell_size;
get<parameters::min_coord>() = min_coord;
auto max_coord = math::to<float3>(algorithm::reduce_max(arrays::position::ptr, mem.num_ptcls));
max_coord += 3.f * mem.cell_size;
get<parameters::max_coord>() = max_coord;
// Next we determine the longest dimension of the simulation domain
float max_length = math::max_elem(max_coord - get<parameters::min_coord>());
cuda::sync();
// This step determines D
get<parameters::grid_size>() = math::to<int3>((max_coord - min_coord) / get<parameters::cell_size>().x);
// In order to calculate P2 we divide the max length by the level 0 cell size
int32_t cells = static_cast<int32_t>(max_length / get<parameters::cell_size>().x);
int32_t v = cells;
// This set of operations determines the result of rounding v to the next largest power of two
v--;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
v++;
// Update entries in the Memory struct
mem.min_coord = get<parameters::min_coord>();
mem.grid_size = get<parameters::grid_size>();
// hashParticles determines the Z order indices of all particles
// sort_by_key could be based on thrusts sort index by key method.
if (parameters::hash_size{} == hash_length::bit_32) {
// This is the factor described in the paper to determine C_fine
float factor_morton = 1.f / ((float)(1024 / v));
get<parameters::zOrderScale>() = factor_morton;
cuda::sync();
launch<hashParticles>(mem.num_ptcls, mem, factor_morton, 1.f, arrays::volume::ptr, arrays::velocity::ptr);
cuda::sync();
algorithm::sort_by_key(mem.num_ptcls, mem.ZOrder_32, mem.particleparticleIndex);
}
else {
// This is the factor described in the paper to determine C_fine
float factor_morton = 1.f / ((float)(1048576 / v));
get<parameters::zOrderScale>() = factor_morton;
cuda::sync();
launch<hashParticles>(mem.num_ptcls, mem, factor_morton, 1.f, arrays::volume::ptr, arrays::velocity::ptr);
cuda::sync();
algorithm::sort_by_key(mem.num_ptcls, mem.ZOrder_64, mem.particleparticleIndex);
}
/// This block of code is done to call the actual sorting functions by using a temporary array for resizing 4B
/// and 16B values. This is highly implementation based and as such not important to our overall contribution
/// This marks the start of the actual re-sorting block for particles
{
void* original = arrays::resortArray::ptr;
void* original4 = arrays::resortArray4::ptr;
for_each(sorting_list, [&mem](auto x) {
using P = std::decay_t<decltype(x)>;
if (!P::valid()) return;
using T = typename P::type;
if (sizeof(T) == sizeof(float)) {
launch<sort>(mem.num_ptcls, mem, mem.num_ptcls, (float*)P::ptr, arrays::resortArray::ptr);
cuda::sync();
void* tmp = P::ptr;
P::ptr = (T*)arrays::resortArray::ptr;
arrays::resortArray::ptr = (float*)tmp;
}
else if (sizeof(T) == sizeof(float4)) {
launch<sort>(mem.num_ptcls, mem, mem.num_ptcls, (float4*)P::ptr, arrays::resortArray4::ptr);
cuda::sync();
void* tmp = P::ptr;
P::ptr = (T*)arrays::resortArray4::ptr;
arrays::resortArray4::ptr = (float4*)tmp;
}
else
LOG_ERROR << "Cannot sort array of data size " << sizeof(T) << std::endl;
});
for_each(sorting_list, [&mem, original, original4](auto x) {
using P = std::decay_t<decltype(x)>;
if (!P::valid()) return;
using T = typename P::type;
if (sizeof(T) == sizeof(float)) {
if (P::ptr == original) {
cuda::memcpy(arrays::resortArray::ptr, P::ptr, sizeof(T) * mem.num_ptcls, hipMemcpyDeviceToDevice);
cuda::sync();
P::ptr = (T*)arrays::resortArray::ptr;
arrays::resortArray::ptr = (float*)original;
}
}
else if (sizeof(T) == sizeof(float4)) {
if (P::ptr == original4) {
cuda::memcpy(arrays::resortArray4::ptr, P::ptr, sizeof(T) * mem.num_ptcls, hipMemcpyDeviceToDevice);
cuda::sync();
P::ptr = (T*)arrays::resortArray4::ptr;
arrays::resortArray4::ptr = (float4*)original4;
}
}
});
}
/// This marks the end of the actual re-sorting block for particles
// update the position pointer to the resorted pointer
mem.position = arrays::position::ptr;
// This step uses a predicate to remove potentially invalid particles from the simulation.
// This is done for particles with invalid positions, velocities or volumes in three steps.
// The check for invalid velocities and volumes is mainly for stability when developing new methods
// and could be removed for a normal implementation to avoid the slight performance cost
cuda::sync();
auto iter = algorithm::count_if(arrays::position::ptr, mem.num_ptcls, invalid_position());
cuda::sync();
cuda::sync();
if (iter != 0) {
auto diff = get<parameters::num_ptcls>() - iter;
get<parameters::num_ptcls>() = static_cast<int32_t>(diff);
mem.num_ptcls = static_cast<int32_t>(diff);
}
cuda::sync();
iter = algorithm::count_if(arrays::velocity::ptr, mem.num_ptcls, invalid_position());
cuda::sync();
if (iter != 0) {
auto diff = get<parameters::num_ptcls>() - iter;
get<parameters::num_ptcls>() = static_cast<int32_t>(diff);
mem.num_ptcls = static_cast<int32_t>(diff);
}
cuda::sync();
iter = algorithm::count_if(arrays::volume::ptr, mem.num_ptcls, invalid_volume());
cuda::sync();
if (iter != 0) {
auto diff = get<parameters::num_ptcls>() - iter;
get<parameters::num_ptcls>() = static_cast<int32_t>(diff);
mem.num_ptcls = static_cast<int32_t>(diff);
}
cuda::sync();
// After particle removal we determine L_i for all particles
launch<calculateResolution>(mem.num_ptcls, mem, arrays::volume::ptr);
cuda::sync();
cuda::sync();
// Initialize the set of values keeping track of occupied valid and collided hash and cell entries
get<parameters::valid_cells>() = 0;
get<parameters::collision_cells>() = 0;
get<parameters::occupiedCells>().clear();
// For every level
for (int32_t i = 0; i < (int32_t)parameters::mlm_schemes{}; ++i) {
// Determine the resolution factor 0.5^L iteratively
float factor = 1.f;
for (int ii = 0; ii < i; ++ii)
factor *= 0.5f;
// As described in the submission, we store all levels in a single continuous array
compactHashSpan *hashMap = arrays::compactHashMap::ptr + parameters::hash_entries{} *i;
compact_cellSpan *cellSpan = arrays::compactCellSpan::ptr + parameters::max_numptcls{} *i;
mem.compactCellSpan = cellSpan;
mem.compactHashMap = hashMap;
// Start by resetting all indices
cuda::arrayMemset<arrays::cellparticleIndex>(0xFFFFFFFF);
cuda::arrayMemset<arrays::ZOrder_64>(0xFFFFFFFF);
cuda::sync();
// Hash particles also calculates the hash index of all particles not just the Z order index.
// This is done again here as the hash index varies per resolution level
launch<hashParticles>(mem.num_ptcls, mem, factor, factor,arrays::volume::ptr, arrays::velocity::ptr);
cuda::sync();
// Reinitialize the hash map with all invalid entries
algorithm::generate(hashMap, mem.hash_entries, hash_spans());
cuda::sync();
// Index the particles into their cells
launch<indexCells>(mem.num_ptcls, mem, mem.num_ptcls, arrays::cellparticleIndex::ptr);
cuda::sync();
// And compact the array
int32_t diff = static_cast<int32_t>(algorithm::copy_if(arrays::cellparticleIndex::ptr, arrays::compactparticleIndex::ptr, mem.num_ptcls + 1, is_valid()));
cuda::sync();
// Determine C_length based on C_begin^compact
launch<buildCellTable1>(diff, mem, diff, arrays::compactparticleIndex::ptr, factor);
cuda::sync();
// Calculate the hash indices of every cell
launch<buildCellTable2>(diff, mem, diff, arrays::compactparticleIndex::ptr, factor);
cuda::sync();
// The compact operation before gave is the number of occupied cells + 1 thus we remove 1 entry before proceeding
diff--;
get<parameters::occupiedCells>().push_back(diff);
// Resort an index array based on the hashed indices
algorithm::sort_by_key(diff, mem.resortIndex, mem.particleparticleIndex);
cuda::sync();
// Resort the actual cells into a temporary array and copy the results back into the cell list
launch<sort>(diff, mem, diff, cellSpan, mem.compactCellSpanSwap);
cuda::sync();
cuda::memcpy(cellSpan, mem.compactCellSpanSwap, sizeof(cell_span) * diff, hipMemcpyDeviceToDevice);
cuda::sync();
// Calculate H_begin for every occupied hash cell
launch<buildHashTable1>(diff, mem, diff);
cuda::sync();
// Calculate H_length for every occupid hash cell
launch<buildHashTable2>(diff, mem, diff);
cuda::sync();
// Embedd C into H if there is no collision
launch<compact>(diff, mem, diff);
cuda::sync();
//int32_t collisionsg = algorithm::count_if((mem.hashMap), parameters::hash_entries{}, count_if());
//get<parameters::collision_cells>() += collisionsg;
//if(i == 0)
// get<parameters::valid_cells>() += diff;
}
}
}
| 0bd4130c03b3d87d1c1758cbaacadce75199d726.cu | /** This file contains the implementation of the embedded version of our multi level memory model. The purpose of this
* code is to start with an input of simulation data, e.g. from a prior timestep and to re-sort this data first into
* cells and then to construct the according hash maps for this data. Significant amounts of this code are external,
* e.g. the sorting method itself as this was not part of our contribution.*/
#include <SPH/compactMLMResort/compactMLM.cuh>
#include <utility/include_all.h>
#include "dummy.h"
// The SPH namespace contains all of our simulation methods to avoid global namespace collisions
namespace SPH {
// compactMLM refers to the embedded version of our MLM data structure where the cell table is embedded in the has table
namespace compactMLM {
// This function is the first step of the hash table construction
// It calculates the first cell in every hash map entry
basicFunctionType cudaHashTablea(SPH::compactMLM::Memory arrays, int32_t threads) {
checkedThreadIdx(i);
auto h = arrays.resortIndex[i];
if (i == 0 || h != arrays.resortIndex[i - 1]) {
arrays.compactHashMap[h].beginning = i;
}
}
// This function is the second step of the hash map construction
// It calculates the length of every hash map entry, where a length > 1 means a collision occured
basicFunctionType cudaHashTableb(SPH::compactMLM::Memory arrays, int32_t threads) {
checkedThreadIdx(i);
auto h = arrays.resortIndex[i];
if (i == threads - 1 || arrays.resortIndex[i + 1] != arrays.resortIndex[i])
arrays.compactHashMap[h].length = i - arrays.compactHashMap[h].beginning + 1;
}
// This function is the first step of the cell table construction
// It calculates the first entry and the cell length simultaneously
basicFunctionType cudaCellTablea(SPH::compactMLM::Memory arrays, int32_t threads, int32_t *compact, float) {
checkedThreadIdx(i);
arrays.compactCellSpan[i] = cell_span{ compact[i], compact[i + 1] - compact[i] };
}
// This function is the second step of the cell table construction
// It prepares the re-sort index by storing H_c in the index arrays by using the first particle in the cell
basicFunctionType cudaCellTableb(SPH::compactMLM::Memory arrays, int32_t threads, int32_t *compact, float ratio) {
checkedThreadIdx(i);
auto x_i = arrays.position[compact[i]];
arrays.resortIndex[i] = position_to_hash(x_i, arrays.min_coord, (arrays.cell_size.x) * ratio, arrays.hash_entries);
arrays.particleparticleIndex[i] = i;
}
// This function determines the ideal resolution r for each particle according to the submission
basicFunctionType cudaMLMResolution(SPH::compactMLM::Memory arrays, float* volume) {
checkedParticleIdx(idx);
float target_neighbors = Kernel<kernel_kind::spline4>::neighbor_number * 0.95f;
float kernel_epsilon =
(1.f) * powf((target_neighbors) / ((4.f / 3.f) * CUDART_PI_F), 1.f / 3.f) / Kernel<kernel_kind::spline4>::kernel_size();
auto particle_volume = volume[idx];
auto actual_support = kernel_epsilon * powf(particle_volume, 1.f / 3.f);
float h_i = actual_support * Kernel<kernel_kind::spline4>::kernel_size();
int32_t r_i = (int32_t)math::clamp(math::floorf(math::abs(log2f(arrays.cell_size.x / h_i))) - 0, 0, arrays.mlm_schemes - 1);
float f_i = powf(0.5f, ((float)r_i));
arrays.MLMResolution[idx] = r_i;
}
// This function is used to set the re-sorting indices for particles by calculating both the Morton Code for 32 and 64 bit
// Keys as well as the hash index. Invalid particles (marked with a .w entry of FLT_MAX or a volume of 0 or with NaN
// quantities are set to a re-sorting index that puts them at the end for removal).
cellFunctionType cudaHashParticles(SPH::compactMLM::Memory arrays, float ratio, float ratio2, float* volumes, float4* velocity) {
checkedParticleIdx(i);
auto x_i = arrays.position[i];
auto x = x_i;
auto h_i = (x_i.w);
if (!(x.w == FLT_MAX || x.x != x.x || x.y != x.y || x.z != x.z || x.w != x.w) &&
!(volumes[i] != volumes[i] || volumes[i] == 0.f || volumes[i] > 1e21f)&&
!(velocity[i].x != velocity[i].x || velocity[i].y != velocity[i].y || velocity[i].z != velocity[i].z)){
arrays.ZOrder_64[i] = position_to_morton(x_i, arrays, ratio);
arrays.ZOrder_32[i] = static_cast<int32_t>(position_to_morton_32(x_i, arrays, ratio));
arrays.resortIndex[i] = position_to_hash(x_i, arrays, ratio2);
arrays.particleparticleIndex[i] = i;
}
else {
arrays.ZOrder_64[i] = INT64_MAX;
arrays.ZOrder_32[i] = INT_MAX;
arrays.resortIndex[i] = INT_MAX;
arrays.particleparticleIndex[i] = i;
}
}
// This function is used as the pre processing step of sorting the cells by calculating Z_i for every particle
// if the particle is in a different cell than the prior cell and inserts an element marking the last cell.
// This is done as described in the submission.
cellFunctionType cudaIndexCells(SPH::compactMLM::Memory arrays, int32_t threads, int32_t *cell_indices) {
checkedThreadIdx(i);
if (i == 0)
cell_indices[0] = 0;
i++;
if (hash_width == hash_length::bit_64)
cell_indices[i] = i == arrays.num_ptcls || arrays.ZOrder_64[i - 1] != arrays.ZOrder_64[i] ? i : -1;
else
cell_indices[i] = i == arrays.num_ptcls || arrays.ZOrder_32[i - 1] != arrays.ZOrder_32[i] ? i : -1;
}
// This function is used to re-ort an input array into an output array based on the sorted indices in parallel
template <typename T> hostDeviceInline void cudaSortCompactmlm(SPH::compactMLM::Memory arrays, int32_t threads,
T* input, T* output) {
checkedThreadIdx(i);
output[i] = input[arrays.particleparticleIndex[i]];
}
// This function embeds the cell information into the hash map if there is no collision for some performance gains
basicFunctionType compactHashMap(SPH::compactMLM::Memory arrays, int32_t threads) {
checkedThreadIdx(i);
auto h = arrays.resortIndex[i];
auto hashEntry = arrays.compactHashMap[h];
auto cell = arrays.compactCellSpan[i];
if (hashEntry.length == 1)
arrays.compactHashMap[h] = compactHashSpan{ 1, (uint32_t) cell.beginning, cell.length };
else
arrays.compactHashMap[h].compacted = 0;
}
// A set of wrapper functions that allow us to call the functions on either the GPU or CPU.
cellFunction(hashParticles, cudaHashParticles, "hashing particles", caches<float4>{});
basicFunction(calculateResolution, cudaMLMResolution, "generating cell table");
basicFunction(buildCellTable1, cudaCellTablea, "creating cell table I");
basicFunction(buildCellTable2, cudaCellTableb, "creating cell table II");
basicFunction(buildHashTable1, cudaHashTablea, "hashing cell table I");
basicFunction(buildHashTable2, cudaHashTableb, "hashing cell table II");
cellFunction(indexCells, cudaIndexCells, "indexing cells");
basicFunction(sort, cudaSortCompactmlm, "compact resorting cells");
basicFunction(compact, compactHashMap, "compact hashmap");
// A set of predicate functions used for array compaction and counting
struct is_valid {
hostDeviceInline bool operator()(const int x) { return x != -1; }
};
struct count_if {
hostDeviceInline bool operator()(const compactHashSpan x) { return x.beginning != UINT31_MAX && x.length > 1; }
};
struct invalid_position {
hostDeviceInline bool operator()(float4 x) { return x.w == FLT_MAX || x.x != x.x || x.y != x.y || x.z != x.z || x.w != x.w; }
};
struct invalid_volume {
hostDeviceInline bool operator()(float x) { return x == FLT_MAX || x == 0.f || x !=x; }
};
struct hash_spans {
hostDeviceInline compactHashSpan operator()() { return compactHashSpan{0, UINT31_MAX,0 }; }
};
} // namespace resort_mlm
} // namespace SPH
// This is the main function of the module and is called externally with a Memory structure pre filled
// The paper uses a reduction to determine the largest support radius, however for our simulation framework we instead
// rely on a user provided upper bound of the cell size stored in mem.cell_size. This could be replaced for a different
// implementation, but our adaptive method also has a lower bound of resolution that is fixed so we based the cell size
// on that resolution instead of recalcuating it every step.
void SPH::compactMLM::resortParticles(Memory mem) {
if (mem.num_ptcls > 0) {
// We initially start by calculating D_min and D_max as described in the submission by using reduction Operations
// Instead of directly using this we instead add a buffer on the outside of the bounding box of 3 cell widths
// as this can improve re-sorting performance slightly.
auto min_coord = math::to<float3>(algorithm::reduce_min(arrays::position::ptr, mem.num_ptcls));
min_coord -= 3.f * mem.cell_size;
get<parameters::min_coord>() = min_coord;
auto max_coord = math::to<float3>(algorithm::reduce_max(arrays::position::ptr, mem.num_ptcls));
max_coord += 3.f * mem.cell_size;
get<parameters::max_coord>() = max_coord;
// Next we determine the longest dimension of the simulation domain
float max_length = math::max_elem(max_coord - get<parameters::min_coord>());
cuda::sync();
// This step determines D
get<parameters::grid_size>() = math::to<int3>((max_coord - min_coord) / get<parameters::cell_size>().x);
// In order to calculate P2 we divide the max length by the level 0 cell size
int32_t cells = static_cast<int32_t>(max_length / get<parameters::cell_size>().x);
int32_t v = cells;
// This set of operations determines the result of rounding v to the next largest power of two
v--;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
v++;
// Update entries in the Memory struct
mem.min_coord = get<parameters::min_coord>();
mem.grid_size = get<parameters::grid_size>();
// hashParticles determines the Z order indices of all particles
// sort_by_key could be based on thrusts sort index by key method.
if (parameters::hash_size{} == hash_length::bit_32) {
// This is the factor described in the paper to determine C_fine
float factor_morton = 1.f / ((float)(1024 / v));
get<parameters::zOrderScale>() = factor_morton;
cuda::sync();
launch<hashParticles>(mem.num_ptcls, mem, factor_morton, 1.f, arrays::volume::ptr, arrays::velocity::ptr);
cuda::sync();
algorithm::sort_by_key(mem.num_ptcls, mem.ZOrder_32, mem.particleparticleIndex);
}
else {
// This is the factor described in the paper to determine C_fine
float factor_morton = 1.f / ((float)(1048576 / v));
get<parameters::zOrderScale>() = factor_morton;
cuda::sync();
launch<hashParticles>(mem.num_ptcls, mem, factor_morton, 1.f, arrays::volume::ptr, arrays::velocity::ptr);
cuda::sync();
algorithm::sort_by_key(mem.num_ptcls, mem.ZOrder_64, mem.particleparticleIndex);
}
/// This block of code is done to call the actual sorting functions by using a temporary array for resizing 4B
/// and 16B values. This is highly implementation based and as such not important to our overall contribution
/// This marks the start of the actual re-sorting block for particles
{
void* original = arrays::resortArray::ptr;
void* original4 = arrays::resortArray4::ptr;
for_each(sorting_list, [&mem](auto x) {
using P = std::decay_t<decltype(x)>;
if (!P::valid()) return;
using T = typename P::type;
if (sizeof(T) == sizeof(float)) {
launch<sort>(mem.num_ptcls, mem, mem.num_ptcls, (float*)P::ptr, arrays::resortArray::ptr);
cuda::sync();
void* tmp = P::ptr;
P::ptr = (T*)arrays::resortArray::ptr;
arrays::resortArray::ptr = (float*)tmp;
}
else if (sizeof(T) == sizeof(float4)) {
launch<sort>(mem.num_ptcls, mem, mem.num_ptcls, (float4*)P::ptr, arrays::resortArray4::ptr);
cuda::sync();
void* tmp = P::ptr;
P::ptr = (T*)arrays::resortArray4::ptr;
arrays::resortArray4::ptr = (float4*)tmp;
}
else
LOG_ERROR << "Cannot sort array of data size " << sizeof(T) << std::endl;
});
for_each(sorting_list, [&mem, original, original4](auto x) {
using P = std::decay_t<decltype(x)>;
if (!P::valid()) return;
using T = typename P::type;
if (sizeof(T) == sizeof(float)) {
if (P::ptr == original) {
cuda::memcpy(arrays::resortArray::ptr, P::ptr, sizeof(T) * mem.num_ptcls, cudaMemcpyDeviceToDevice);
cuda::sync();
P::ptr = (T*)arrays::resortArray::ptr;
arrays::resortArray::ptr = (float*)original;
}
}
else if (sizeof(T) == sizeof(float4)) {
if (P::ptr == original4) {
cuda::memcpy(arrays::resortArray4::ptr, P::ptr, sizeof(T) * mem.num_ptcls, cudaMemcpyDeviceToDevice);
cuda::sync();
P::ptr = (T*)arrays::resortArray4::ptr;
arrays::resortArray4::ptr = (float4*)original4;
}
}
});
}
/// This marks the end of the actual re-sorting block for particles
// update the position pointer to the resorted pointer
mem.position = arrays::position::ptr;
// This step uses a predicate to remove potentially invalid particles from the simulation.
// This is done for particles with invalid positions, velocities or volumes in three steps.
// The check for invalid velocities and volumes is mainly for stability when developing new methods
// and could be removed for a normal implementation to avoid the slight performance cost
cuda::sync();
auto iter = algorithm::count_if(arrays::position::ptr, mem.num_ptcls, invalid_position());
cuda::sync();
cuda::sync();
if (iter != 0) {
auto diff = get<parameters::num_ptcls>() - iter;
get<parameters::num_ptcls>() = static_cast<int32_t>(diff);
mem.num_ptcls = static_cast<int32_t>(diff);
}
cuda::sync();
iter = algorithm::count_if(arrays::velocity::ptr, mem.num_ptcls, invalid_position());
cuda::sync();
if (iter != 0) {
auto diff = get<parameters::num_ptcls>() - iter;
get<parameters::num_ptcls>() = static_cast<int32_t>(diff);
mem.num_ptcls = static_cast<int32_t>(diff);
}
cuda::sync();
iter = algorithm::count_if(arrays::volume::ptr, mem.num_ptcls, invalid_volume());
cuda::sync();
if (iter != 0) {
auto diff = get<parameters::num_ptcls>() - iter;
get<parameters::num_ptcls>() = static_cast<int32_t>(diff);
mem.num_ptcls = static_cast<int32_t>(diff);
}
cuda::sync();
// After particle removal we determine L_i for all particles
launch<calculateResolution>(mem.num_ptcls, mem, arrays::volume::ptr);
cuda::sync();
cuda::sync();
// Initialize the set of values keeping track of occupied valid and collided hash and cell entries
get<parameters::valid_cells>() = 0;
get<parameters::collision_cells>() = 0;
get<parameters::occupiedCells>().clear();
// For every level
for (int32_t i = 0; i < (int32_t)parameters::mlm_schemes{}; ++i) {
// Determine the resolution factor 0.5^L iteratively
float factor = 1.f;
for (int ii = 0; ii < i; ++ii)
factor *= 0.5f;
// As described in the submission, we store all levels in a single continuous array
compactHashSpan *hashMap = arrays::compactHashMap::ptr + parameters::hash_entries{} *i;
compact_cellSpan *cellSpan = arrays::compactCellSpan::ptr + parameters::max_numptcls{} *i;
mem.compactCellSpan = cellSpan;
mem.compactHashMap = hashMap;
// Start by resetting all indices
cuda::arrayMemset<arrays::cellparticleIndex>(0xFFFFFFFF);
cuda::arrayMemset<arrays::ZOrder_64>(0xFFFFFFFF);
cuda::sync();
// Hash particles also calculates the hash index of all particles not just the Z order index.
// This is done again here as the hash index varies per resolution level
launch<hashParticles>(mem.num_ptcls, mem, factor, factor,arrays::volume::ptr, arrays::velocity::ptr);
cuda::sync();
// Reinitialize the hash map with all invalid entries
algorithm::generate(hashMap, mem.hash_entries, hash_spans());
cuda::sync();
// Index the particles into their cells
launch<indexCells>(mem.num_ptcls, mem, mem.num_ptcls, arrays::cellparticleIndex::ptr);
cuda::sync();
// And compact the array
int32_t diff = static_cast<int32_t>(algorithm::copy_if(arrays::cellparticleIndex::ptr, arrays::compactparticleIndex::ptr, mem.num_ptcls + 1, is_valid()));
cuda::sync();
// Determine C_length based on C_begin^compact
launch<buildCellTable1>(diff, mem, diff, arrays::compactparticleIndex::ptr, factor);
cuda::sync();
// Calculate the hash indices of every cell
launch<buildCellTable2>(diff, mem, diff, arrays::compactparticleIndex::ptr, factor);
cuda::sync();
// The compact operation before gave is the number of occupied cells + 1 thus we remove 1 entry before proceeding
diff--;
get<parameters::occupiedCells>().push_back(diff);
// Resort an index array based on the hashed indices
algorithm::sort_by_key(diff, mem.resortIndex, mem.particleparticleIndex);
cuda::sync();
// Resort the actual cells into a temporary array and copy the results back into the cell list
launch<sort>(diff, mem, diff, cellSpan, mem.compactCellSpanSwap);
cuda::sync();
cuda::memcpy(cellSpan, mem.compactCellSpanSwap, sizeof(cell_span) * diff, cudaMemcpyDeviceToDevice);
cuda::sync();
// Calculate H_begin for every occupied hash cell
launch<buildHashTable1>(diff, mem, diff);
cuda::sync();
// Calculate H_length for every occupid hash cell
launch<buildHashTable2>(diff, mem, diff);
cuda::sync();
// Embedd C into H if there is no collision
launch<compact>(diff, mem, diff);
cuda::sync();
//int32_t collisionsg = algorithm::count_if((mem.hashMap), parameters::hash_entries{}, count_if());
//get<parameters::collision_cells>() += collisionsg;
//if(i == 0)
// get<parameters::valid_cells>() += diff;
}
}
}
|
c6b4090bad710b38c6506018cb70817f975f05a2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh"
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/sync_batch_norm_impl.cuh"
const int kWarpSize = 32;
const int kNumWarps = 32;
__inline__ __device__ float HalfFloatInputConvert(const half val) { return __half2float(val); }
__inline__ __device__ float HalfFloatInputConvert(const float val) { return val; }
__inline__ __device__ void HalfFloatOutputAssign(const float val, float *arr, int idx) { arr[idx] = val; }
__inline__ __device__ void HalfFloatOutputAssign(const float val, half *arr, int idx) { arr[idx] = __float2half(val); }
template <typename T>
__global__ void SyncBatchNormPre(size_t N, size_t C, size_t H, size_t W, const T *input, int *output_n,
float *output_mean, float *output_invstd, float epsilon) {
// block level memory
__shared__ float shared_mean[kNumWarps];
__shared__ float shared_var[kNumWarps];
__shared__ int shared_n[kNumWarps];
int warpId = threadIdx.x / kWarpSize; // threads execute in warps of 32
int laneId = threadIdx.x % kWarpSize;
int plane = blockIdx.x;
int plane_size = N * H * W;
if (threadIdx.x < kNumWarps) {
shared_mean[threadIdx.x] = static_cast<float>(0);
shared_var[threadIdx.x] = static_cast<float>(0);
}
// ensure all 0 init complete across all values
__syncthreads();
// agg values
float avg = 0;
float var_n = 0;
int n = 0;
// individual thread level reduction
for (int x = threadIdx.x; x < plane_size; x += blockDim.x) {
int index = (x / (H * W) * C * H * W) + (plane * H * W) + (x % (H * W));
float input_val = HalfFloatInputConvert(input[index]);
float d1 = input_val - avg;
n++;
avg = avg + (d1 / n);
var_n = var_n + (d1 * (input_val - avg));
}
__syncthreads();
// Reduce every warp to a single value
for (int offset = kWarpSize / 2; offset > 0; offset /= 2) {
float other_avg = __shfl_down_sync(0xffffffff, avg, offset);
float other_n = __shfl_down_sync(0xffffffff, n, offset);
float div_factor = 1.0 / fmaxf(1.0, n + other_n);
float other_var_n = __shfl_down_sync(0xffffffff, var_n, offset);
var_n += other_var_n + (avg - other_avg) * (avg - other_avg) * n * other_n * div_factor;
avg = (n * avg + other_n * other_avg) * div_factor;
n += other_n;
}
__syncwarp();
if (laneId == 0) {
// lane 0 for every warp moves value
shared_n[warpId] = n;
shared_mean[warpId] = avg;
shared_var[warpId] = var_n;
// now one value per warp
}
// second reduction to reduce all warps into a single value
__syncthreads();
if (warpId == 0) {
n = shared_n[laneId];
avg = shared_mean[laneId];
var_n = shared_var[laneId];
__syncwarp();
for (int offset = kWarpSize / 2; offset > 0; offset /= 2) {
int other_n = __shfl_down_sync(0xffffffff, n, offset);
float other_avg = __shfl_down_sync(0xffffffff, avg, offset);
float div_factor = 1.0 / fmaxf(1.0, n + other_n);
float other_var_n = __shfl_down_sync(0xffffffff, var_n, offset);
var_n += other_var_n + (avg - other_avg) * (avg - other_avg) * n * other_n * div_factor;
avg = (n * avg + other_n * other_avg) * div_factor;
n += other_n;
}
__syncwarp();
}
if (threadIdx.x == 0) {
output_n[plane] = n;
output_mean[plane] = avg;
output_invstd[plane] = static_cast<float>(1) / sqrt((var_n / plane_size) + epsilon);
}
return;
}
template <typename T, typename G>
__global__ void SyncBatchNormGather(size_t N, size_t C, size_t H, size_t W, int *counts_global, float *means_global,
float *invstds_global, int *counts_local, float *means_local, float *invstds_local,
T *running_mean_output, T *running_var_output, G *running_mean_input,
G *running_var_input, float epsilon, float momentum, size_t group_rank,
size_t group_size) {
int feature_size = C;
int world_size = group_size;
for (size_t C_ix = blockIdx.x * blockDim.x + threadIdx.x; C_ix < C; C_ix += blockDim.x * gridDim.x) {
float avg = 0;
float var_n = 0;
float n = 0;
for (int N_ix = 0; N_ix < world_size; N_ix++) {
int count = counts_global[N_ix * feature_size + C_ix];
float mean_ = means_global[N_ix * feature_size + C_ix];
float std = static_cast<float>(1) / invstds_global[N_ix * feature_size + C_ix];
float var_n_ = (std * std - epsilon) * count;
float div_factor = 1.0 / fmaxf(1.0, n + count);
var_n += var_n_ + (avg - mean_) * (avg - mean_) * n * count * div_factor;
avg = n * div_factor * avg + count * div_factor * mean_;
n += count;
}
means_local[C_ix] = avg;
invstds_local[C_ix] = static_cast<float>(1) / sqrt((var_n / n) + epsilon);
HalfFloatOutputAssign(((1 - momentum) * HalfFloatInputConvert(running_mean_input[C_ix]) + momentum * avg),
running_mean_output, C_ix);
float unbiasedVar = 0.0;
if (n != 0) { // not strictly required since pipeline does not allow empty inputs
unbiasedVar = var_n / n;
}
HalfFloatOutputAssign(((1 - momentum) * HalfFloatInputConvert(running_var_input[C_ix]) + momentum * unbiasedVar),
running_var_output, C_ix);
}
return;
}
template <typename T, typename S>
__global__ void SyncBatchNormPost(size_t N, size_t C, size_t H, size_t W, const T *input, T *output, float *means_local,
float *invstds_local, S *scale, S *bias, float epsilon) {
int size = N * C * H * W;
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) {
int block_num = (pos / W) / H; // which of N * C blocks
int plane = block_num % C;
float scale_plane = HalfFloatInputConvert(scale[plane]);
float bias_plane = HalfFloatInputConvert(bias[plane]);
float mean_plane = means_local[plane];
float invstd_plane = invstds_local[plane];
float input_val = HalfFloatInputConvert(input[pos]);
HalfFloatOutputAssign(scale_plane * (input_val - mean_plane) * invstd_plane + bias_plane, output, pos);
}
return;
}
template <typename S>
__global__ void SyncBatchNormPostBiasScale(size_t C, S *scale, S *bias, S *output_scale, S *output_bias) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < C; pos += blockDim.x * gridDim.x) {
output_bias[pos] = bias[pos];
output_scale[pos] = scale[pos];
}
return;
}
template <typename T>
void CalSyncBatchNormPre(size_t N, size_t C, size_t H, size_t W, const T *input, int *output_n, float *output_mean,
float *output_var, float epsilon, hipStream_t cuda_stream) {
hipLaunchKernelGGL(( SyncBatchNormPre), dim3(C), dim3(GET_THREADS), 0, cuda_stream, N, C, H, W, input, output_n, output_mean, output_var, epsilon);
return;
}
template <typename T, typename G>
void CalSyncBatchNormGather(size_t N, size_t C, size_t H, size_t W, int *counts_global, float *means_global,
float *invstds_global, int *counts_local, float *means_local, float *invstds_local,
T *running_mean_output, T *running_var_output, G *running_mean_input, G *running_var_input,
float epsilon, float momentum, size_t group_rank, size_t group_size,
hipStream_t cuda_stream) {
hipLaunchKernelGGL(( SyncBatchNormGather), dim3(GET_BLOCKS(C)), dim3(GET_THREADS), 0, cuda_stream,
N, C, H, W, counts_global, means_global, invstds_global, counts_local, means_local, invstds_local,
running_mean_output, running_var_output, running_mean_input, running_var_input, epsilon, momentum, group_rank,
group_size);
return;
}
template <typename T, typename S>
void CalSyncBatchNormPost(size_t N, size_t C, size_t H, size_t W, const T *input, T *output, float *means_local,
float *invstds_local, S *scale, S *bias, S *output_scale, S *output_bias, float epsilon,
hipStream_t cuda_stream) {
hipLaunchKernelGGL(( SyncBatchNormPost), dim3(GET_BLOCKS(N * C * H * W)), dim3(GET_THREADS), 0, cuda_stream, N, C, H, W, input, output, means_local,
invstds_local, scale, bias, epsilon);
hipLaunchKernelGGL(( SyncBatchNormPostBiasScale), dim3(1), dim3(::min(C, static_cast<size_t>(GET_THREADS))), 0, cuda_stream,
C, scale, bias, output_scale, output_bias);
return;
}
template CUDA_LIB_EXPORT void CalSyncBatchNormPre<float>(size_t N, size_t C, size_t H, size_t W, const float *input,
int *output_n, float *output_mean, float *output_var,
float epsilon, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSyncBatchNormPre<half>(size_t N, size_t C, size_t H, size_t W, const half *input,
int *output_n, float *output_mean, float *output_var,
float epsilon, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSyncBatchNormGather<float, float>(size_t N_, size_t C_, size_t H_, size_t W_,
int *counts_global, float *means_global,
float *invstds_global, int *counts_local,
float *means_local, float *invstds_local,
float *running_mean_output,
float *running_var_output, float *running_mean_input,
float *running_var_input, float epsilon,
float momentum, size_t group_rank, size_t group_size,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSyncBatchNormGather<float, half>(size_t N_, size_t C_, size_t H_, size_t W_,
int *counts_global, float *means_global,
float *invstds_global, int *counts_local,
float *means_local, float *invstds_local,
float *running_mean_output, float *running_var_output,
half *running_mean_input, half *running_var_input,
float epsilon, float momentum, size_t group_rank,
size_t group_size, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSyncBatchNormGather<half, float>(size_t N_, size_t C_, size_t H_, size_t W_,
int *counts_global, float *means_global,
float *invstds_global, int *counts_local,
float *means_local, float *invstds_local,
half *running_mean_output, half *running_var_output,
float *running_mean_input, float *running_var_input,
float epsilon, float momentum, size_t group_rank,
size_t group_size, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSyncBatchNormGather<half, half>(size_t N_, size_t C_, size_t H_, size_t W_,
int *counts_global, float *means_global,
float *invstds_global, int *counts_local,
float *means_local, float *invstds_local,
half *running_mean_output, half *running_var_output,
half *running_mean_input, half *running_var_input,
float epsilon, float momentum, size_t group_rank,
size_t group_size, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSyncBatchNormPost<float, float>(size_t N, size_t C, size_t H, size_t W,
const float *input, float *output, float *means_local,
float *invstds_local, float *scale, float *bias,
float *output_scale, float *output_bias, float epsilon,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSyncBatchNormPost<float, half>(size_t N, size_t C, size_t H, size_t W,
const float *input, float *output, float *means_local,
float *invstds_local, half *scale, half *bias,
half *output_scale, half *output_bias, float epsilon,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSyncBatchNormPost<half, float>(size_t N, size_t C, size_t H, size_t W,
const half *input, half *output, float *means_local,
float *invstds_local, float *scale, float *bias,
float *output_scale, float *output_bias, float epsilon,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSyncBatchNormPost<half, half>(size_t N, size_t C, size_t H, size_t W,
const half *input, half *output, float *means_local,
float *invstds_local, half *scale, half *bias,
half *output_scale, half *output_bias, float epsilon,
hipStream_t cuda_stream);
| c6b4090bad710b38c6506018cb70817f975f05a2.cu | /**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh"
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/sync_batch_norm_impl.cuh"
const int kWarpSize = 32;
const int kNumWarps = 32;
__inline__ __device__ float HalfFloatInputConvert(const half val) { return __half2float(val); }
__inline__ __device__ float HalfFloatInputConvert(const float val) { return val; }
__inline__ __device__ void HalfFloatOutputAssign(const float val, float *arr, int idx) { arr[idx] = val; }
__inline__ __device__ void HalfFloatOutputAssign(const float val, half *arr, int idx) { arr[idx] = __float2half(val); }
template <typename T>
__global__ void SyncBatchNormPre(size_t N, size_t C, size_t H, size_t W, const T *input, int *output_n,
float *output_mean, float *output_invstd, float epsilon) {
// block level memory
__shared__ float shared_mean[kNumWarps];
__shared__ float shared_var[kNumWarps];
__shared__ int shared_n[kNumWarps];
int warpId = threadIdx.x / kWarpSize; // threads execute in warps of 32
int laneId = threadIdx.x % kWarpSize;
int plane = blockIdx.x;
int plane_size = N * H * W;
if (threadIdx.x < kNumWarps) {
shared_mean[threadIdx.x] = static_cast<float>(0);
shared_var[threadIdx.x] = static_cast<float>(0);
}
// ensure all 0 init complete across all values
__syncthreads();
// agg values
float avg = 0;
float var_n = 0;
int n = 0;
// individual thread level reduction
for (int x = threadIdx.x; x < plane_size; x += blockDim.x) {
int index = (x / (H * W) * C * H * W) + (plane * H * W) + (x % (H * W));
float input_val = HalfFloatInputConvert(input[index]);
float d1 = input_val - avg;
n++;
avg = avg + (d1 / n);
var_n = var_n + (d1 * (input_val - avg));
}
__syncthreads();
// Reduce every warp to a single value
for (int offset = kWarpSize / 2; offset > 0; offset /= 2) {
float other_avg = __shfl_down_sync(0xffffffff, avg, offset);
float other_n = __shfl_down_sync(0xffffffff, n, offset);
float div_factor = 1.0 / fmaxf(1.0, n + other_n);
float other_var_n = __shfl_down_sync(0xffffffff, var_n, offset);
var_n += other_var_n + (avg - other_avg) * (avg - other_avg) * n * other_n * div_factor;
avg = (n * avg + other_n * other_avg) * div_factor;
n += other_n;
}
__syncwarp();
if (laneId == 0) {
// lane 0 for every warp moves value
shared_n[warpId] = n;
shared_mean[warpId] = avg;
shared_var[warpId] = var_n;
// now one value per warp
}
// second reduction to reduce all warps into a single value
__syncthreads();
if (warpId == 0) {
n = shared_n[laneId];
avg = shared_mean[laneId];
var_n = shared_var[laneId];
__syncwarp();
for (int offset = kWarpSize / 2; offset > 0; offset /= 2) {
int other_n = __shfl_down_sync(0xffffffff, n, offset);
float other_avg = __shfl_down_sync(0xffffffff, avg, offset);
float div_factor = 1.0 / fmaxf(1.0, n + other_n);
float other_var_n = __shfl_down_sync(0xffffffff, var_n, offset);
var_n += other_var_n + (avg - other_avg) * (avg - other_avg) * n * other_n * div_factor;
avg = (n * avg + other_n * other_avg) * div_factor;
n += other_n;
}
__syncwarp();
}
if (threadIdx.x == 0) {
output_n[plane] = n;
output_mean[plane] = avg;
output_invstd[plane] = static_cast<float>(1) / sqrt((var_n / plane_size) + epsilon);
}
return;
}
template <typename T, typename G>
__global__ void SyncBatchNormGather(size_t N, size_t C, size_t H, size_t W, int *counts_global, float *means_global,
float *invstds_global, int *counts_local, float *means_local, float *invstds_local,
T *running_mean_output, T *running_var_output, G *running_mean_input,
G *running_var_input, float epsilon, float momentum, size_t group_rank,
size_t group_size) {
int feature_size = C;
int world_size = group_size;
for (size_t C_ix = blockIdx.x * blockDim.x + threadIdx.x; C_ix < C; C_ix += blockDim.x * gridDim.x) {
float avg = 0;
float var_n = 0;
float n = 0;
for (int N_ix = 0; N_ix < world_size; N_ix++) {
int count = counts_global[N_ix * feature_size + C_ix];
float mean_ = means_global[N_ix * feature_size + C_ix];
float std = static_cast<float>(1) / invstds_global[N_ix * feature_size + C_ix];
float var_n_ = (std * std - epsilon) * count;
float div_factor = 1.0 / fmaxf(1.0, n + count);
var_n += var_n_ + (avg - mean_) * (avg - mean_) * n * count * div_factor;
avg = n * div_factor * avg + count * div_factor * mean_;
n += count;
}
means_local[C_ix] = avg;
invstds_local[C_ix] = static_cast<float>(1) / sqrt((var_n / n) + epsilon);
HalfFloatOutputAssign(((1 - momentum) * HalfFloatInputConvert(running_mean_input[C_ix]) + momentum * avg),
running_mean_output, C_ix);
float unbiasedVar = 0.0;
if (n != 0) { // not strictly required since pipeline does not allow empty inputs
unbiasedVar = var_n / n;
}
HalfFloatOutputAssign(((1 - momentum) * HalfFloatInputConvert(running_var_input[C_ix]) + momentum * unbiasedVar),
running_var_output, C_ix);
}
return;
}
template <typename T, typename S>
__global__ void SyncBatchNormPost(size_t N, size_t C, size_t H, size_t W, const T *input, T *output, float *means_local,
float *invstds_local, S *scale, S *bias, float epsilon) {
int size = N * C * H * W;
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) {
int block_num = (pos / W) / H; // which of N * C blocks
int plane = block_num % C;
float scale_plane = HalfFloatInputConvert(scale[plane]);
float bias_plane = HalfFloatInputConvert(bias[plane]);
float mean_plane = means_local[plane];
float invstd_plane = invstds_local[plane];
float input_val = HalfFloatInputConvert(input[pos]);
HalfFloatOutputAssign(scale_plane * (input_val - mean_plane) * invstd_plane + bias_plane, output, pos);
}
return;
}
template <typename S>
__global__ void SyncBatchNormPostBiasScale(size_t C, S *scale, S *bias, S *output_scale, S *output_bias) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < C; pos += blockDim.x * gridDim.x) {
output_bias[pos] = bias[pos];
output_scale[pos] = scale[pos];
}
return;
}
template <typename T>
void CalSyncBatchNormPre(size_t N, size_t C, size_t H, size_t W, const T *input, int *output_n, float *output_mean,
float *output_var, float epsilon, cudaStream_t cuda_stream) {
SyncBatchNormPre<<<C, GET_THREADS, 0, cuda_stream>>>(N, C, H, W, input, output_n, output_mean, output_var, epsilon);
return;
}
template <typename T, typename G>
void CalSyncBatchNormGather(size_t N, size_t C, size_t H, size_t W, int *counts_global, float *means_global,
float *invstds_global, int *counts_local, float *means_local, float *invstds_local,
T *running_mean_output, T *running_var_output, G *running_mean_input, G *running_var_input,
float epsilon, float momentum, size_t group_rank, size_t group_size,
cudaStream_t cuda_stream) {
SyncBatchNormGather<<<GET_BLOCKS(C), GET_THREADS, 0, cuda_stream>>>(
N, C, H, W, counts_global, means_global, invstds_global, counts_local, means_local, invstds_local,
running_mean_output, running_var_output, running_mean_input, running_var_input, epsilon, momentum, group_rank,
group_size);
return;
}
template <typename T, typename S>
void CalSyncBatchNormPost(size_t N, size_t C, size_t H, size_t W, const T *input, T *output, float *means_local,
float *invstds_local, S *scale, S *bias, S *output_scale, S *output_bias, float epsilon,
cudaStream_t cuda_stream) {
SyncBatchNormPost<<<GET_BLOCKS(N * C * H * W), GET_THREADS, 0, cuda_stream>>>(N, C, H, W, input, output, means_local,
invstds_local, scale, bias, epsilon);
SyncBatchNormPostBiasScale<<<1, std::min(C, static_cast<size_t>(GET_THREADS)), 0, cuda_stream>>>(
C, scale, bias, output_scale, output_bias);
return;
}
template CUDA_LIB_EXPORT void CalSyncBatchNormPre<float>(size_t N, size_t C, size_t H, size_t W, const float *input,
int *output_n, float *output_mean, float *output_var,
float epsilon, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSyncBatchNormPre<half>(size_t N, size_t C, size_t H, size_t W, const half *input,
int *output_n, float *output_mean, float *output_var,
float epsilon, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSyncBatchNormGather<float, float>(size_t N_, size_t C_, size_t H_, size_t W_,
int *counts_global, float *means_global,
float *invstds_global, int *counts_local,
float *means_local, float *invstds_local,
float *running_mean_output,
float *running_var_output, float *running_mean_input,
float *running_var_input, float epsilon,
float momentum, size_t group_rank, size_t group_size,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSyncBatchNormGather<float, half>(size_t N_, size_t C_, size_t H_, size_t W_,
int *counts_global, float *means_global,
float *invstds_global, int *counts_local,
float *means_local, float *invstds_local,
float *running_mean_output, float *running_var_output,
half *running_mean_input, half *running_var_input,
float epsilon, float momentum, size_t group_rank,
size_t group_size, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSyncBatchNormGather<half, float>(size_t N_, size_t C_, size_t H_, size_t W_,
int *counts_global, float *means_global,
float *invstds_global, int *counts_local,
float *means_local, float *invstds_local,
half *running_mean_output, half *running_var_output,
float *running_mean_input, float *running_var_input,
float epsilon, float momentum, size_t group_rank,
size_t group_size, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSyncBatchNormGather<half, half>(size_t N_, size_t C_, size_t H_, size_t W_,
int *counts_global, float *means_global,
float *invstds_global, int *counts_local,
float *means_local, float *invstds_local,
half *running_mean_output, half *running_var_output,
half *running_mean_input, half *running_var_input,
float epsilon, float momentum, size_t group_rank,
size_t group_size, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSyncBatchNormPost<float, float>(size_t N, size_t C, size_t H, size_t W,
const float *input, float *output, float *means_local,
float *invstds_local, float *scale, float *bias,
float *output_scale, float *output_bias, float epsilon,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSyncBatchNormPost<float, half>(size_t N, size_t C, size_t H, size_t W,
const float *input, float *output, float *means_local,
float *invstds_local, half *scale, half *bias,
half *output_scale, half *output_bias, float epsilon,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSyncBatchNormPost<half, float>(size_t N, size_t C, size_t H, size_t W,
const half *input, half *output, float *means_local,
float *invstds_local, float *scale, float *bias,
float *output_scale, float *output_bias, float epsilon,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSyncBatchNormPost<half, half>(size_t N, size_t C, size_t H, size_t W,
const half *input, half *output, float *means_local,
float *invstds_local, half *scale, half *bias,
half *output_scale, half *output_bias, float epsilon,
cudaStream_t cuda_stream);
|
be2313c11f5e310126c9e5efdd43ff381873e206.hip | // !!! This is a file automatically generated by hipify!!!
/*
count the number of match tuple in each partition and each thread
*/
#include <stdio.h>
#include <stdint.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "tuple.h"
extern "C" {
__global__
void count_partitioning(
TUPLE *t,
int *L,
int p_num,
int t_num,
int rows_num,
int table_type
)
{
int rows_n = rows_num;
int p_n = p_num;
int t_n = t_num;
int PER_TH = (table_type==LEFT) ? LEFT_PER_TH:RIGHT_PER_TH;
int DEF = blockIdx.x * blockDim.x * PER_TH;
int x = blockIdx.x * blockDim.x + threadIdx.x;
//int Dim = (gridDim.x-1==blockIdx.x) ? (t_n-blockIdx.x*blockDim.x):blockDim.x;
// Matching phase
int hash = 0;
if(x < t_n){
for(int i = 0; i<PER_TH&&(DEF+threadIdx.x*PER_TH+i)<rows_n;i++){
hash = t[DEF + threadIdx.x*PER_TH + i].val % p_n;
L[hash*t_n + x]++;
}
}
}
__global__
void partitioning(
TUPLE *t,
TUPLE *pt,
int *L,
int p_num,
int t_num,
int rows_num,
int table_type
)
{
int p_n = p_num;
int t_n = t_num;
int rows_n = rows_num;
int PER_TH = (table_type==LEFT) ? LEFT_PER_TH:RIGHT_PER_TH;
int DEF = blockIdx.x * blockDim.x * PER_TH;
int x = blockIdx.x * blockDim.x + threadIdx.x;
//int Dim = (gridDim.x-1==blockIdx.x) ? (t_n-blockIdx.x*blockDim.x):blockDim.x;
// Matching phase
int hash = 0;
int temp = 0;
if(x < t_n){
for(int i = 0; i<PER_TH&&(DEF+threadIdx.x*PER_TH+i)<rows_n;i++){
hash = t[DEF+threadIdx.x*PER_TH+i].val%p_n;
temp = L[hash*t_n + x]++;
pt[temp] = t[DEF+threadIdx.x*PER_TH+i];
}
}
}
}
| be2313c11f5e310126c9e5efdd43ff381873e206.cu | /*
count the number of match tuple in each partition and each thread
*/
#include <stdio.h>
#include <stdint.h>
#include <cuda.h>
#include <sys/time.h>
#include "tuple.h"
extern "C" {
__global__
void count_partitioning(
TUPLE *t,
int *L,
int p_num,
int t_num,
int rows_num,
int table_type
)
{
int rows_n = rows_num;
int p_n = p_num;
int t_n = t_num;
int PER_TH = (table_type==LEFT) ? LEFT_PER_TH:RIGHT_PER_TH;
int DEF = blockIdx.x * blockDim.x * PER_TH;
int x = blockIdx.x * blockDim.x + threadIdx.x;
//int Dim = (gridDim.x-1==blockIdx.x) ? (t_n-blockIdx.x*blockDim.x):blockDim.x;
// Matching phase
int hash = 0;
if(x < t_n){
for(int i = 0; i<PER_TH&&(DEF+threadIdx.x*PER_TH+i)<rows_n;i++){
hash = t[DEF + threadIdx.x*PER_TH + i].val % p_n;
L[hash*t_n + x]++;
}
}
}
__global__
void partitioning(
TUPLE *t,
TUPLE *pt,
int *L,
int p_num,
int t_num,
int rows_num,
int table_type
)
{
int p_n = p_num;
int t_n = t_num;
int rows_n = rows_num;
int PER_TH = (table_type==LEFT) ? LEFT_PER_TH:RIGHT_PER_TH;
int DEF = blockIdx.x * blockDim.x * PER_TH;
int x = blockIdx.x * blockDim.x + threadIdx.x;
//int Dim = (gridDim.x-1==blockIdx.x) ? (t_n-blockIdx.x*blockDim.x):blockDim.x;
// Matching phase
int hash = 0;
int temp = 0;
if(x < t_n){
for(int i = 0; i<PER_TH&&(DEF+threadIdx.x*PER_TH+i)<rows_n;i++){
hash = t[DEF+threadIdx.x*PER_TH+i].val%p_n;
temp = L[hash*t_n + x]++;
pt[temp] = t[DEF+threadIdx.x*PER_TH+i];
}
}
}
}
|
9bd1fcecc4483e6a2667b2a0ea833ae25b2ec5e5.hip | // !!! This is a file automatically generated by hipify!!!
////////////////////////////////////////////////////////////////////////////////
// BSD 3-Clause License
//
// Copyright (c) 2021, NVIDIA Corporation
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/////////////////////////////////////////////////////////////////////////////////
#include <matx.h>
using namespace matx;
/**
* MatX training assignment 2. This training goes through tensor operations that
* were learned in the 02_operators notebook. Uncomment each verification block
* as you go to ensure your solutions are correct.
*/
int main() {
auto A = make_tensor<float>({2, 3});
auto B = make_tensor<float>({2, 3});
auto V = make_tensor<float>({3});
/****************************************************************************************************
* Initialize tensor A with increasing values from 0.5 to 3.0 in steps of 0.4,
*and tensor V from -1 to -3 in steps of -1.
****************************************************************************************************/
/*** End editing ***/
// Verify init is correct
float step = 0.5;
for (int row = 0; row < A.Size(0); row++) {
for (int col = 0; col < A.Size(1); col++) {
if (A(row, col) != step) {
printf("Mismatch in A init view! actual = %f, expected = %f\n",
A(row, col), step);
exit(-1);
}
step += 0.5;
}
}
for (int col = 0; col < V.Size(0); col++) {
if (V(col) != (-1 + col * -1)) {
printf("Mismatch in A init view! actual = %f, expected = %f\n", V(col),
(float)(-1 + col * -1));
exit(-1);
}
}
print(A);
print(V);
printf("Init verification passed!\n");
/****************************************************************************************************
* Add 5.0 to all elements of A and store the results back in A
****************************************************************************************************/
/*** End editing ***/
hipStreamSynchronize(0);
step = 0.5;
for (int row = 0; row < A.Size(0); row++) {
for (int col = 0; col < A.Size(1); col++) {
if (A(row, col) != (5.0 + step)) {
printf("Mismatch in A sum view! actual = %f, expected = %f\n",
A(row, col), 5.0 + step);
exit(-1);
}
step += 0.5;
}
}
print(A);
printf("Sum verification passed!\n");
/****************************************************************************************************
* Clone V to match the dimensions of A, and subtract V from A. The results
* should be stored in A
*
* https://devtech-compute.gitlab-master-pages.nvidia.com/matx/quickstart.html#increasing-dimensionality
* https://devtech-compute.gitlab-master-pages.nvidia.com/matx/api/tensorview.html#_CPPv4I0_iEN4matx12tensor_tE
*
****************************************************************************************************/
/// auto tvs = ;
/*** End editing. ***/
// hipStreamSynchronize(0);
// step = 0.5;
// for (int row = 0; row < A.Size(0); row++) {
// for (int col = 0; col < A.Size(1); col++) {
// if (A(row, col) != (5.0 + step - tvs(row, col))) {
// printf("Mismatch in A sub view! actual = %f, expected = %f\n", A(row,
// col), 5.0 + step - tvs(row, col)); exit(-1);
// }
// step += 0.5;
// }
// }
// print(A);
// print(tvs);
// printf("Clone verification passed!\n");
/****************************************************************************************************
* Raise the matrix A to the power of 2 and multiply the output by two. Next,
* subtract the vector V from each row. Store the result in tensor B.
*
* https://devtech-compute.gitlab-master-pages.nvidia.com/matx/api/tensorops.html#_CPPv4N4matx3powE2Op2Op
****************************************************************************************************/
/*** End editing ***/
hipStreamSynchronize(0);
for (int row = 0; row < B.Size(0); row++) {
for (int col = 0; col < B.Size(1); col++) {
if (B(row, col) != powf(A(row, col), 2) * 2 - V(col)) {
printf("Mismatch in B init view! actual = %f, expected = %f\n",
B(row, col), powf(A(row, col), 2) * 2 - V(col));
exit(-1);
}
}
}
print(B);
printf("Mixed verification passed!\n");
return 0;
}
| 9bd1fcecc4483e6a2667b2a0ea833ae25b2ec5e5.cu | ////////////////////////////////////////////////////////////////////////////////
// BSD 3-Clause License
//
// Copyright (c) 2021, NVIDIA Corporation
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/////////////////////////////////////////////////////////////////////////////////
#include <matx.h>
using namespace matx;
/**
* MatX training assignment 2. This training goes through tensor operations that
* were learned in the 02_operators notebook. Uncomment each verification block
* as you go to ensure your solutions are correct.
*/
int main() {
auto A = make_tensor<float>({2, 3});
auto B = make_tensor<float>({2, 3});
auto V = make_tensor<float>({3});
/****************************************************************************************************
* Initialize tensor A with increasing values from 0.5 to 3.0 in steps of 0.4,
*and tensor V from -1 to -3 in steps of -1.
****************************************************************************************************/
/*** End editing ***/
// Verify init is correct
float step = 0.5;
for (int row = 0; row < A.Size(0); row++) {
for (int col = 0; col < A.Size(1); col++) {
if (A(row, col) != step) {
printf("Mismatch in A init view! actual = %f, expected = %f\n",
A(row, col), step);
exit(-1);
}
step += 0.5;
}
}
for (int col = 0; col < V.Size(0); col++) {
if (V(col) != (-1 + col * -1)) {
printf("Mismatch in A init view! actual = %f, expected = %f\n", V(col),
(float)(-1 + col * -1));
exit(-1);
}
}
print(A);
print(V);
printf("Init verification passed!\n");
/****************************************************************************************************
* Add 5.0 to all elements of A and store the results back in A
****************************************************************************************************/
/*** End editing ***/
cudaStreamSynchronize(0);
step = 0.5;
for (int row = 0; row < A.Size(0); row++) {
for (int col = 0; col < A.Size(1); col++) {
if (A(row, col) != (5.0 + step)) {
printf("Mismatch in A sum view! actual = %f, expected = %f\n",
A(row, col), 5.0 + step);
exit(-1);
}
step += 0.5;
}
}
print(A);
printf("Sum verification passed!\n");
/****************************************************************************************************
* Clone V to match the dimensions of A, and subtract V from A. The results
* should be stored in A
*
* https://devtech-compute.gitlab-master-pages.nvidia.com/matx/quickstart.html#increasing-dimensionality
* https://devtech-compute.gitlab-master-pages.nvidia.com/matx/api/tensorview.html#_CPPv4I0_iEN4matx12tensor_tE
*
****************************************************************************************************/
/// auto tvs = ;
/*** End editing. ***/
// cudaStreamSynchronize(0);
// step = 0.5;
// for (int row = 0; row < A.Size(0); row++) {
// for (int col = 0; col < A.Size(1); col++) {
// if (A(row, col) != (5.0 + step - tvs(row, col))) {
// printf("Mismatch in A sub view! actual = %f, expected = %f\n", A(row,
// col), 5.0 + step - tvs(row, col)); exit(-1);
// }
// step += 0.5;
// }
// }
// print(A);
// print(tvs);
// printf("Clone verification passed!\n");
/****************************************************************************************************
* Raise the matrix A to the power of 2 and multiply the output by two. Next,
* subtract the vector V from each row. Store the result in tensor B.
*
* https://devtech-compute.gitlab-master-pages.nvidia.com/matx/api/tensorops.html#_CPPv4N4matx3powE2Op2Op
****************************************************************************************************/
/*** End editing ***/
cudaStreamSynchronize(0);
for (int row = 0; row < B.Size(0); row++) {
for (int col = 0; col < B.Size(1); col++) {
if (B(row, col) != powf(A(row, col), 2) * 2 - V(col)) {
printf("Mismatch in B init view! actual = %f, expected = %f\n",
B(row, col), powf(A(row, col), 2) * 2 - V(col));
exit(-1);
}
}
}
print(B);
printf("Mixed verification passed!\n");
return 0;
}
|
f075900ca140027ebc3a902392a6af9c7991c7fc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@author Mark Gates
@author Tingxing Dong
@author Azzam Haidar
@generated from magmablas/zgemv_fermi.cu, normal z -> d, Tue Aug 30 09:38:28 2016
*/
#include "magma_internal.h"
#include "commonblas_d.h"
#include "magma_templates.h"
#define PRECISION_d
#include "gemv_template_device.cuh"
#include "gemv_config/gemvn_param.h"
#include "gemv_config/gemvt_param.h"
#define version(s,v) s ## _V_ ## v
/******************************************************************************/
// NoTrans kernel
template<const int DIM_X, const int DIM_Y, const int TILE_SIZE>
__global__ void
dgemvn_template_kernel_fermi(
int m, int n, double alpha,
const double * __restrict__ A, int lda,
const double * __restrict__ x, int incx, double beta,
double * __restrict__ y, int incy)
{
#if (__CUDA_ARCH__ >= 200)
gemvn_template_device<double, DIM_X, DIM_Y, TILE_SIZE>
(m, n, alpha, A, lda, x, incx, beta, y, incy);
#endif /* (__CUDA_ARCH__ >= 200) */
}
/******************************************************************************/
// Trans/ConjTans kernel
template<const int DIM_X, const int DIM_Y, const int TILE_SIZE, magma_trans_t trans>
__global__ void
dgemvc_template_kernel_fermi(
int m, int n, double alpha,
const double * __restrict__ A, int lda,
const double * __restrict__ x, int incx, double beta,
double * __restrict__ y, int incy)
{
#if (__CUDA_ARCH__ >= 200)
gemvc_template_device< double, DIM_X, DIM_Y, TILE_SIZE, trans >
(m, n, alpha, A, lda, x, incx, beta, y, incy);
#endif /* (__CUDA_ARCH__ >= 200) */
}
/******************************************************************************/
// NoTrans CPU driver
template<const int DIM_X, const int DIM_Y, const int TILE_SIZE>
void
dgemvn_template_fermi(
magma_int_t m, magma_int_t n, double alpha,
const double * __restrict__ A, magma_int_t lda,
const double * __restrict__ x, magma_int_t incx, double beta,
double * __restrict__ y, magma_int_t incy,
magma_queue_t queue)
{
dim3 grid( magma_ceildiv(m, TILE_SIZE), 1 );
dim3 threads( DIM_X, DIM_Y );
hipLaunchKernelGGL(( dgemvn_template_kernel_fermi<DIM_X, DIM_Y, TILE_SIZE>)
, dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, alpha, A, lda, x, incx, beta, y, incy);
}
/******************************************************************************/
// Trans/ConjTans CPU driver
template<const int DIM_X, const int DIM_Y, const int TILE_SIZE>
void
dgemvc_template_fermi(
magma_trans_t trans, magma_int_t m, magma_int_t n, double alpha,
const double * __restrict__ A, magma_int_t lda,
const double * __restrict__ x, magma_int_t incx, double beta,
double * __restrict__ y, magma_int_t incy,
magma_queue_t queue)
{
dim3 grid ( magma_ceildiv(n, TILE_SIZE), 1 );
dim3 threads ( DIM_X, DIM_Y );
if (trans == MagmaConjTrans) {
hipLaunchKernelGGL(( dgemvc_template_kernel_fermi< DIM_X, DIM_Y, TILE_SIZE, MagmaConjTrans >)
, dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, alpha, A, lda, x, incx, beta, y, incy);
}
else {
hipLaunchKernelGGL(( dgemvc_template_kernel_fermi< DIM_X, DIM_Y, TILE_SIZE, MagmaTrans >)
, dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, alpha, A, lda, x, incx, beta, y, incy);
}
}
/***************************************************************************//**
Purpose
-------
DGEMV performs one of the matrix-vector operations
y := alpha*A*x + beta*y, or
y := alpha*A**T*x + beta*y, or
y := alpha*A**H*x + beta*y,
where alpha and beta are scalars, x and y are vectors and A is an
m by n matrix.
Arguments
----------
@param[in]
trans magma_trans_t
On entry, TRANS specifies the operation to be performed as
follows:
- = MagmaNoTrans: y := alpha*A *x + beta*y
- = MagmaTrans: y := alpha*A^T*x + beta*y
- = MagmaConjTrans: y := alpha*A^H*x + beta*y
@param[in]
m INTEGER
On entry, m specifies the number of rows of the matrix A.
@param[in]
n INTEGER
On entry, n specifies the number of columns of the matrix A
@param[in]
alpha DOUBLE PRECISION
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA DOUBLE PRECISION array of dimension ( LDDA, n ) on the GPU.
@param[in]
ldda INTEGER
LDDA specifies the leading dimension of A.
@param[in]
dx DOUBLE PRECISION array of dimension
n if trans == MagmaNoTrans
m if trans == MagmaTrans or MagmaConjTrans
@param[in]
incx Specifies the increment for the elements of X.
INCX must not be zero.
@param[in]
beta DOUBLE PRECISION
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[out]
dy DOUBLE PRECISION array of dimension
m if trans == MagmaNoTrans
n if trans == MagmaTrans or MagmaConjTrans
@param[in]
incy Specifies the increment for the elements of Y.
INCY must not be zero.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_gemv
*******************************************************************************/
extern "C" void
magmablas_dgemv_q(
magma_trans_t trans, magma_int_t m, magma_int_t n,
double alpha,
magmaDouble_const_ptr dA, magma_int_t ldda,
magmaDouble_const_ptr dx, magma_int_t incx,
double beta,
magmaDouble_ptr dy, magma_int_t incy,
magma_queue_t queue)
{
magma_int_t info = 0;
if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < m )
info = -6;
else if ( incx == 0 )
info = -8;
else if ( incy == 0 )
info = -11;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
if ( trans == MagmaNoTrans ) {
if (m <= 256) {
dgemvn_template_fermi<version(N, 137)>
( m, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue );
}
else {
dgemvn_template_fermi<version(N, 140)>
( m, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue );
}
}
else {
dgemvc_template_fermi<version(T, 189)>
( trans, m, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue );
}
}
| f075900ca140027ebc3a902392a6af9c7991c7fc.cu | /*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@author Mark Gates
@author Tingxing Dong
@author Azzam Haidar
@generated from magmablas/zgemv_fermi.cu, normal z -> d, Tue Aug 30 09:38:28 2016
*/
#include "magma_internal.h"
#include "commonblas_d.h"
#include "magma_templates.h"
#define PRECISION_d
#include "gemv_template_device.cuh"
#include "gemv_config/gemvn_param.h"
#include "gemv_config/gemvt_param.h"
#define version(s,v) s ## _V_ ## v
/******************************************************************************/
// NoTrans kernel
template<const int DIM_X, const int DIM_Y, const int TILE_SIZE>
__global__ void
dgemvn_template_kernel_fermi(
int m, int n, double alpha,
const double * __restrict__ A, int lda,
const double * __restrict__ x, int incx, double beta,
double * __restrict__ y, int incy)
{
#if (__CUDA_ARCH__ >= 200)
gemvn_template_device<double, DIM_X, DIM_Y, TILE_SIZE>
(m, n, alpha, A, lda, x, incx, beta, y, incy);
#endif /* (__CUDA_ARCH__ >= 200) */
}
/******************************************************************************/
// Trans/ConjTans kernel
template<const int DIM_X, const int DIM_Y, const int TILE_SIZE, magma_trans_t trans>
__global__ void
dgemvc_template_kernel_fermi(
int m, int n, double alpha,
const double * __restrict__ A, int lda,
const double * __restrict__ x, int incx, double beta,
double * __restrict__ y, int incy)
{
#if (__CUDA_ARCH__ >= 200)
gemvc_template_device< double, DIM_X, DIM_Y, TILE_SIZE, trans >
(m, n, alpha, A, lda, x, incx, beta, y, incy);
#endif /* (__CUDA_ARCH__ >= 200) */
}
/******************************************************************************/
// NoTrans CPU driver
template<const int DIM_X, const int DIM_Y, const int TILE_SIZE>
void
dgemvn_template_fermi(
magma_int_t m, magma_int_t n, double alpha,
const double * __restrict__ A, magma_int_t lda,
const double * __restrict__ x, magma_int_t incx, double beta,
double * __restrict__ y, magma_int_t incy,
magma_queue_t queue)
{
dim3 grid( magma_ceildiv(m, TILE_SIZE), 1 );
dim3 threads( DIM_X, DIM_Y );
dgemvn_template_kernel_fermi<DIM_X, DIM_Y, TILE_SIZE>
<<< grid, threads, 0, queue->cuda_stream() >>>
(m, n, alpha, A, lda, x, incx, beta, y, incy);
}
/******************************************************************************/
// Trans/ConjTans CPU driver
template<const int DIM_X, const int DIM_Y, const int TILE_SIZE>
void
dgemvc_template_fermi(
magma_trans_t trans, magma_int_t m, magma_int_t n, double alpha,
const double * __restrict__ A, magma_int_t lda,
const double * __restrict__ x, magma_int_t incx, double beta,
double * __restrict__ y, magma_int_t incy,
magma_queue_t queue)
{
dim3 grid ( magma_ceildiv(n, TILE_SIZE), 1 );
dim3 threads ( DIM_X, DIM_Y );
if (trans == MagmaConjTrans) {
dgemvc_template_kernel_fermi< DIM_X, DIM_Y, TILE_SIZE, MagmaConjTrans >
<<< grid, threads, 0, queue->cuda_stream() >>>
(m, n, alpha, A, lda, x, incx, beta, y, incy);
}
else {
dgemvc_template_kernel_fermi< DIM_X, DIM_Y, TILE_SIZE, MagmaTrans >
<<< grid, threads, 0, queue->cuda_stream() >>>
(m, n, alpha, A, lda, x, incx, beta, y, incy);
}
}
/***************************************************************************//**
Purpose
-------
DGEMV performs one of the matrix-vector operations
y := alpha*A*x + beta*y, or
y := alpha*A**T*x + beta*y, or
y := alpha*A**H*x + beta*y,
where alpha and beta are scalars, x and y are vectors and A is an
m by n matrix.
Arguments
----------
@param[in]
trans magma_trans_t
On entry, TRANS specifies the operation to be performed as
follows:
- = MagmaNoTrans: y := alpha*A *x + beta*y
- = MagmaTrans: y := alpha*A^T*x + beta*y
- = MagmaConjTrans: y := alpha*A^H*x + beta*y
@param[in]
m INTEGER
On entry, m specifies the number of rows of the matrix A.
@param[in]
n INTEGER
On entry, n specifies the number of columns of the matrix A
@param[in]
alpha DOUBLE PRECISION
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA DOUBLE PRECISION array of dimension ( LDDA, n ) on the GPU.
@param[in]
ldda INTEGER
LDDA specifies the leading dimension of A.
@param[in]
dx DOUBLE PRECISION array of dimension
n if trans == MagmaNoTrans
m if trans == MagmaTrans or MagmaConjTrans
@param[in]
incx Specifies the increment for the elements of X.
INCX must not be zero.
@param[in]
beta DOUBLE PRECISION
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[out]
dy DOUBLE PRECISION array of dimension
m if trans == MagmaNoTrans
n if trans == MagmaTrans or MagmaConjTrans
@param[in]
incy Specifies the increment for the elements of Y.
INCY must not be zero.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_gemv
*******************************************************************************/
extern "C" void
magmablas_dgemv_q(
magma_trans_t trans, magma_int_t m, magma_int_t n,
double alpha,
magmaDouble_const_ptr dA, magma_int_t ldda,
magmaDouble_const_ptr dx, magma_int_t incx,
double beta,
magmaDouble_ptr dy, magma_int_t incy,
magma_queue_t queue)
{
magma_int_t info = 0;
if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < m )
info = -6;
else if ( incx == 0 )
info = -8;
else if ( incy == 0 )
info = -11;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
if ( trans == MagmaNoTrans ) {
if (m <= 256) {
dgemvn_template_fermi<version(N, 137)>
( m, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue );
}
else {
dgemvn_template_fermi<version(N, 140)>
( m, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue );
}
}
else {
dgemvc_template_fermi<version(T, 189)>
( trans, m, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue );
}
}
|
51ef0213449d5cfc7c9191dd6f21759bf8a624a8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../common/common.h"
#include <stdio.h>
/*
* A simple introduction to programming in CUDA. This program prints "Hello
* World from GPU! from 10 CUDA threads running on the GPU.
*/
__global__ void helloFromGPU()
{
printf("Hello World from GPU!\n");
}
int main(int argc, char **argv)
{
printf("Hello World from CPU!\n");
hipLaunchKernelGGL(( helloFromGPU) , dim3(1), dim3(1), 0, 0, );
CHECK(hipDeviceReset());
return 0;
}
| 51ef0213449d5cfc7c9191dd6f21759bf8a624a8.cu | #include "../common/common.h"
#include <stdio.h>
/*
* A simple introduction to programming in CUDA. This program prints "Hello
* World from GPU! from 10 CUDA threads running on the GPU.
*/
__global__ void helloFromGPU()
{
printf("Hello World from GPU!\n");
}
int main(int argc, char **argv)
{
printf("Hello World from CPU!\n");
helloFromGPU <<<1, 1>>>();
CHECK(cudaDeviceReset());
return 0;
}
|
58dd93df82e738ae1824f4ede348b48703b457b7.hip | // !!! This is a file automatically generated by hipify!!!
#include <algorithm>
#include <cassert>
#include "backend/cuda_backend.cuh"
#include "base/array.h"
#include "management/cuda_manager.h"
#include "management/volume_factory.h"
namespace vecgeom {
CudaManager::CudaManager() {
synchronized = true;
world_ = NULL;
world_gpu_ = NULL;
verbose = 0;
total_volumes = 0;
}
LogicalVolume const* CudaManager::world() const {
assert(world_ != NULL);
return world_;
}
LogicalVolume const* CudaManager::world_gpu() const {
assert(world_gpu_ != NULL);
return world_gpu_;
}
LogicalVolume const* CudaManager::Synchronize() {
if (verbose > 0) std::cerr << "Starting synchronization to GPU.\n";
// Will return null if no geometry is loaded
if (synchronized) return world_gpu_;
CleanGpu();
// Populate the memory map with GPU addresses
AllocateGeometry();
// Create new objects with pointers adjusted to point to GPU memory, then
// copy them to the allocated memory locations on the GPU.
if (verbose > 1) std::cerr << "Copying geometry to GPU...";
if (verbose > 2) std::cerr << "\nCopying logical volumes...";
for (std::set<LogicalVolume const*>::const_iterator i =
logical_volumes.begin(); i != logical_volumes.end(); ++i) {
(*i)->CopyToGpu(
LookupUnplaced((*i)->unplaced_volume()),
LookupDaughters((*i)->daughters_),
LookupLogical(*i)
);
}
if (verbose > 2) std::cerr << " OK\n";
if (verbose > 2) std::cerr << "Copying unplaced volumes...";
for (std::set<VUnplacedVolume const*>::const_iterator i =
unplaced_volumes.begin(); i != unplaced_volumes.end(); ++i) {
(*i)->CopyToGpu(LookupUnplaced(*i));
}
if (verbose > 2) std::cout << " OK\n";
if (verbose > 2) std::cout << "Copying placed volumes...";
for (std::set<VPlacedVolume const*>::const_iterator i =
placed_volumes.begin(); i != placed_volumes.end(); ++i) {
(*i)->CopyToGpu(
LookupLogical((*i)->logical_volume()),
LookupMatrix((*i)->matrix()),
LookupPlaced(*i)
);
}
if (verbose > 2) std::cout << " OK\n";
if (verbose > 2) std::cout << "Copying transformation matrices...";
for (std::set<TransformationMatrix const*>::const_iterator i =
matrices.begin(); i != matrices.end(); ++i) {
(*i)->CopyToGpu(LookupMatrix(*i));
}
if (verbose > 2) std::cout << " OK\n";
if (verbose > 2) std::cout << "Copying daughter arrays...";
for (std::set<Container<Daughter> *>::const_iterator i =
daughters.begin(); i != daughters.end(); ++i) {
// First handle C arrays that must now point to GPU locations
const int daughter_count = (*i)->size();
Daughter *const daughter_array = new Daughter[daughter_count];
int j = 0;
for (Iterator<Daughter> k = (*i)->begin(); k != (*i)->end(); ++k) {
daughter_array[j] = LookupPlaced(*k);
j++;
}
vecgeom::CopyToGpu(
daughter_array, LookupDaughterArray(*i), daughter_count*sizeof(Daughter)
);
// Create array object wrapping newly copied C arrays
(*i)->CopyToGpu(LookupDaughterArray(*i), LookupDaughters(*i));
}
if (verbose > 1) std::cout << " OK\n";
synchronized = true;
world_gpu_ = LookupLogical(world_);
if (verbose > 0) std::cout << "Geometry synchronized to GPU.\n";
return world_gpu_;
}
void CudaManager::LoadGeometry(LogicalVolume const *const volume) {
CleanGpu();
logical_volumes.clear();
unplaced_volumes.clear();
placed_volumes.clear();
matrices.clear();
daughters.clear();
world_ = volume;
ScanGeometry(volume);
// Already set by CleanGpu(), but keep it here for good measure
synchronized = false;
}
void CudaManager::CleanGpu() {
if (memory_map.size() == 0 && world_gpu_ == NULL) return;
if (verbose > 1) std::cout << "Cleaning GPU...";
for (MemoryMap::iterator i = memory_map.begin(); i != memory_map.end(); ++i) {
FreeFromGpu(i->second);
}
memory_map.clear();
world_gpu_ = NULL;
synchronized = false;
if (verbose > 1) std::cout << " OK\n";
}
void CudaManager::AllocateGeometry() {
if (verbose > 1) std::cout << "Allocating geometry on GPU...";
if (verbose > 2) {
size_t free_memory = 0, total_memory = 0;
CudaAssertError(hipMemGetInfo(&free_memory, &total_memory));
std::cout << "\nAvailable memory: " << free_memory << " / "
<< total_memory << std::endl;
}
{
if (verbose > 2) std::cout << "Allocating logical volumes...";
LogicalVolume *gpu_array =
AllocateOnGpu<LogicalVolume>(
logical_volumes.size()*sizeof(LogicalVolume)
);
for (std::set<LogicalVolume const*>::const_iterator i =
logical_volumes.begin(); i != logical_volumes.end(); ++i) {
memory_map[ToCpuAddress(*i)] = ToGpuAddress(gpu_array);
gpu_array++;
}
if (verbose > 2) std::cout << " OK\n";
}
{
if (verbose > 2) std::cout << "Allocating unplaced volumes...";
for (std::set<VUnplacedVolume const*>::const_iterator i =
unplaced_volumes.begin(); i != unplaced_volumes.end(); ++i) {
const GpuAddress gpu_address =
AllocateOnGpu<GpuAddress*>((*i)->memory_size());
memory_map[ToCpuAddress(*i)] = gpu_address;
}
if (verbose > 2) std::cout << " OK\n";
}
{
if (verbose > 2) std::cout << "Allocating placed volumes...";
for (std::set<VPlacedVolume const*>::const_iterator i =
placed_volumes.begin(); i != placed_volumes.end(); ++i) {
const GpuAddress gpu_address =
AllocateOnGpu<GpuAddress*>((*i)->memory_size());
memory_map[ToCpuAddress(*i)] = gpu_address;
}
if (verbose > 2) std::cout << " OK\n";
}
{
if (verbose > 2) std::cout << "Allocating transformation matrices...";
for (std::set<TransformationMatrix const*>::const_iterator i =
matrices.begin(); i != matrices.end(); ++i) {
const GpuAddress gpu_address =
AllocateOnGpu<TransformationMatrix>((*i)->memory_size());
memory_map[ToCpuAddress(*i)] = ToGpuAddress(gpu_address);
}
if (verbose > 2) std::cout << " OK\n";
}
{
if (verbose > 2) std::cout << "Allocating daughter lists...";
Array<Daughter> *gpu_array =
AllocateOnGpu<Array<Daughter> >(
daughters.size()*sizeof(Array<Daughter>)
);
Daughter *gpu_c_array =
AllocateOnGpu<Daughter>(total_volumes*sizeof(Daughter));
for (std::set<Container<Daughter> *>::const_iterator i =
daughters.begin(); i != daughters.end(); ++i) {
memory_map[ToCpuAddress(*i)] = ToGpuAddress(gpu_array);
memory_map[ToCpuAddress(gpu_array)] = ToGpuAddress(gpu_c_array);
gpu_array++;
gpu_c_array += (*i)->size();
}
if (verbose > 2) std::cout << " OK\n";
}
if (verbose == 2) std::cout << " OK\n";
}
void CudaManager::ScanGeometry(LogicalVolume const *const volume) {
if (logical_volumes.find(volume) == logical_volumes.end()) {
logical_volumes.insert(volume);
}
if (unplaced_volumes.find(volume->unplaced_volume_)
== unplaced_volumes.end()) {
unplaced_volumes.insert(volume->unplaced_volume_);
}
if (daughters.find(volume->daughters_) == daughters.end()) {
daughters.insert(volume->daughters_);
}
for (Iterator<Daughter> i = volume->daughters().begin();
i != volume->daughters().end(); ++i) {
if (placed_volumes.find(*i) == placed_volumes.end()) {
placed_volumes.insert(*i);
}
if (matrices.find((*i)->matrix_) == matrices.end()) {
matrices.insert((*i)->matrix_);
}
ScanGeometry((*i)->logical_volume());
}
total_volumes++;
}
void CudaManager::PrintContent() const {
std::cout << "-- Logical volumes with daughters:\n";
for (std::set<LogicalVolume const*>::const_iterator i =
logical_volumes.begin(); i != logical_volumes.end(); ++i) {
std::cout << (**i);
}
std::cout << "-- Unplaced volumes:\n";
for (std::set<VUnplacedVolume const*>::const_iterator i =
unplaced_volumes.begin(); i != unplaced_volumes.end(); ++i) {
std::cout << (**i) << std::endl;
}
std::cout << "-- Placed volumes:\n";
for (std::set<VPlacedVolume const*>::const_iterator i =
placed_volumes.begin(); i != placed_volumes.end(); ++i) {
std::cout << (**i) << std::endl;
}
std::cout << "-- Transformation matrices:\n";
for (std::set<TransformationMatrix const*>::const_iterator i =
matrices.begin(); i != matrices.end(); ++i) {
std::cout << (**i) << std::endl;
}
}
template <typename Type>
typename CudaManager::GpuAddress CudaManager::Lookup(
Type const *const key) {
const CpuAddress cpu_address = ToCpuAddress(key);
GpuAddress output = memory_map[cpu_address];
assert(output != NULL);
return output;
}
VUnplacedVolume* CudaManager::LookupUnplaced(
VUnplacedVolume const *const host_ptr) {
return static_cast<VUnplacedVolume*>(Lookup(host_ptr));
}
LogicalVolume* CudaManager::LookupLogical(
LogicalVolume const *const host_ptr) {
return static_cast<LogicalVolume*>(Lookup(host_ptr));
}
VPlacedVolume* CudaManager::LookupPlaced(
VPlacedVolume const *const host_ptr) {
return static_cast<VPlacedVolume*>(Lookup(host_ptr));
}
TransformationMatrix* CudaManager::LookupMatrix(
TransformationMatrix const *const host_ptr) {
return static_cast<TransformationMatrix*>(Lookup(host_ptr));
}
Array<Daughter>* CudaManager::LookupDaughters(
Container<Daughter> *const host_ptr) {
return static_cast<Array<Daughter>*>(Lookup(host_ptr));
}
Daughter* CudaManager::LookupDaughterArray(
Container<Daughter> *const host_ptr) {
Array<Daughter> const *const daughters = LookupDaughters(host_ptr);
return static_cast<Daughter*>(Lookup(daughters));
}
} // End namespace vecgeom | 58dd93df82e738ae1824f4ede348b48703b457b7.cu | #include <algorithm>
#include <cassert>
#include "backend/cuda_backend.cuh"
#include "base/array.h"
#include "management/cuda_manager.h"
#include "management/volume_factory.h"
namespace vecgeom {
CudaManager::CudaManager() {
synchronized = true;
world_ = NULL;
world_gpu_ = NULL;
verbose = 0;
total_volumes = 0;
}
LogicalVolume const* CudaManager::world() const {
assert(world_ != NULL);
return world_;
}
LogicalVolume const* CudaManager::world_gpu() const {
assert(world_gpu_ != NULL);
return world_gpu_;
}
LogicalVolume const* CudaManager::Synchronize() {
if (verbose > 0) std::cerr << "Starting synchronization to GPU.\n";
// Will return null if no geometry is loaded
if (synchronized) return world_gpu_;
CleanGpu();
// Populate the memory map with GPU addresses
AllocateGeometry();
// Create new objects with pointers adjusted to point to GPU memory, then
// copy them to the allocated memory locations on the GPU.
if (verbose > 1) std::cerr << "Copying geometry to GPU...";
if (verbose > 2) std::cerr << "\nCopying logical volumes...";
for (std::set<LogicalVolume const*>::const_iterator i =
logical_volumes.begin(); i != logical_volumes.end(); ++i) {
(*i)->CopyToGpu(
LookupUnplaced((*i)->unplaced_volume()),
LookupDaughters((*i)->daughters_),
LookupLogical(*i)
);
}
if (verbose > 2) std::cerr << " OK\n";
if (verbose > 2) std::cerr << "Copying unplaced volumes...";
for (std::set<VUnplacedVolume const*>::const_iterator i =
unplaced_volumes.begin(); i != unplaced_volumes.end(); ++i) {
(*i)->CopyToGpu(LookupUnplaced(*i));
}
if (verbose > 2) std::cout << " OK\n";
if (verbose > 2) std::cout << "Copying placed volumes...";
for (std::set<VPlacedVolume const*>::const_iterator i =
placed_volumes.begin(); i != placed_volumes.end(); ++i) {
(*i)->CopyToGpu(
LookupLogical((*i)->logical_volume()),
LookupMatrix((*i)->matrix()),
LookupPlaced(*i)
);
}
if (verbose > 2) std::cout << " OK\n";
if (verbose > 2) std::cout << "Copying transformation matrices...";
for (std::set<TransformationMatrix const*>::const_iterator i =
matrices.begin(); i != matrices.end(); ++i) {
(*i)->CopyToGpu(LookupMatrix(*i));
}
if (verbose > 2) std::cout << " OK\n";
if (verbose > 2) std::cout << "Copying daughter arrays...";
for (std::set<Container<Daughter> *>::const_iterator i =
daughters.begin(); i != daughters.end(); ++i) {
// First handle C arrays that must now point to GPU locations
const int daughter_count = (*i)->size();
Daughter *const daughter_array = new Daughter[daughter_count];
int j = 0;
for (Iterator<Daughter> k = (*i)->begin(); k != (*i)->end(); ++k) {
daughter_array[j] = LookupPlaced(*k);
j++;
}
vecgeom::CopyToGpu(
daughter_array, LookupDaughterArray(*i), daughter_count*sizeof(Daughter)
);
// Create array object wrapping newly copied C arrays
(*i)->CopyToGpu(LookupDaughterArray(*i), LookupDaughters(*i));
}
if (verbose > 1) std::cout << " OK\n";
synchronized = true;
world_gpu_ = LookupLogical(world_);
if (verbose > 0) std::cout << "Geometry synchronized to GPU.\n";
return world_gpu_;
}
void CudaManager::LoadGeometry(LogicalVolume const *const volume) {
CleanGpu();
logical_volumes.clear();
unplaced_volumes.clear();
placed_volumes.clear();
matrices.clear();
daughters.clear();
world_ = volume;
ScanGeometry(volume);
// Already set by CleanGpu(), but keep it here for good measure
synchronized = false;
}
void CudaManager::CleanGpu() {
if (memory_map.size() == 0 && world_gpu_ == NULL) return;
if (verbose > 1) std::cout << "Cleaning GPU...";
for (MemoryMap::iterator i = memory_map.begin(); i != memory_map.end(); ++i) {
FreeFromGpu(i->second);
}
memory_map.clear();
world_gpu_ = NULL;
synchronized = false;
if (verbose > 1) std::cout << " OK\n";
}
void CudaManager::AllocateGeometry() {
if (verbose > 1) std::cout << "Allocating geometry on GPU...";
if (verbose > 2) {
size_t free_memory = 0, total_memory = 0;
CudaAssertError(cudaMemGetInfo(&free_memory, &total_memory));
std::cout << "\nAvailable memory: " << free_memory << " / "
<< total_memory << std::endl;
}
{
if (verbose > 2) std::cout << "Allocating logical volumes...";
LogicalVolume *gpu_array =
AllocateOnGpu<LogicalVolume>(
logical_volumes.size()*sizeof(LogicalVolume)
);
for (std::set<LogicalVolume const*>::const_iterator i =
logical_volumes.begin(); i != logical_volumes.end(); ++i) {
memory_map[ToCpuAddress(*i)] = ToGpuAddress(gpu_array);
gpu_array++;
}
if (verbose > 2) std::cout << " OK\n";
}
{
if (verbose > 2) std::cout << "Allocating unplaced volumes...";
for (std::set<VUnplacedVolume const*>::const_iterator i =
unplaced_volumes.begin(); i != unplaced_volumes.end(); ++i) {
const GpuAddress gpu_address =
AllocateOnGpu<GpuAddress*>((*i)->memory_size());
memory_map[ToCpuAddress(*i)] = gpu_address;
}
if (verbose > 2) std::cout << " OK\n";
}
{
if (verbose > 2) std::cout << "Allocating placed volumes...";
for (std::set<VPlacedVolume const*>::const_iterator i =
placed_volumes.begin(); i != placed_volumes.end(); ++i) {
const GpuAddress gpu_address =
AllocateOnGpu<GpuAddress*>((*i)->memory_size());
memory_map[ToCpuAddress(*i)] = gpu_address;
}
if (verbose > 2) std::cout << " OK\n";
}
{
if (verbose > 2) std::cout << "Allocating transformation matrices...";
for (std::set<TransformationMatrix const*>::const_iterator i =
matrices.begin(); i != matrices.end(); ++i) {
const GpuAddress gpu_address =
AllocateOnGpu<TransformationMatrix>((*i)->memory_size());
memory_map[ToCpuAddress(*i)] = ToGpuAddress(gpu_address);
}
if (verbose > 2) std::cout << " OK\n";
}
{
if (verbose > 2) std::cout << "Allocating daughter lists...";
Array<Daughter> *gpu_array =
AllocateOnGpu<Array<Daughter> >(
daughters.size()*sizeof(Array<Daughter>)
);
Daughter *gpu_c_array =
AllocateOnGpu<Daughter>(total_volumes*sizeof(Daughter));
for (std::set<Container<Daughter> *>::const_iterator i =
daughters.begin(); i != daughters.end(); ++i) {
memory_map[ToCpuAddress(*i)] = ToGpuAddress(gpu_array);
memory_map[ToCpuAddress(gpu_array)] = ToGpuAddress(gpu_c_array);
gpu_array++;
gpu_c_array += (*i)->size();
}
if (verbose > 2) std::cout << " OK\n";
}
if (verbose == 2) std::cout << " OK\n";
}
void CudaManager::ScanGeometry(LogicalVolume const *const volume) {
if (logical_volumes.find(volume) == logical_volumes.end()) {
logical_volumes.insert(volume);
}
if (unplaced_volumes.find(volume->unplaced_volume_)
== unplaced_volumes.end()) {
unplaced_volumes.insert(volume->unplaced_volume_);
}
if (daughters.find(volume->daughters_) == daughters.end()) {
daughters.insert(volume->daughters_);
}
for (Iterator<Daughter> i = volume->daughters().begin();
i != volume->daughters().end(); ++i) {
if (placed_volumes.find(*i) == placed_volumes.end()) {
placed_volumes.insert(*i);
}
if (matrices.find((*i)->matrix_) == matrices.end()) {
matrices.insert((*i)->matrix_);
}
ScanGeometry((*i)->logical_volume());
}
total_volumes++;
}
void CudaManager::PrintContent() const {
std::cout << "-- Logical volumes with daughters:\n";
for (std::set<LogicalVolume const*>::const_iterator i =
logical_volumes.begin(); i != logical_volumes.end(); ++i) {
std::cout << (**i);
}
std::cout << "-- Unplaced volumes:\n";
for (std::set<VUnplacedVolume const*>::const_iterator i =
unplaced_volumes.begin(); i != unplaced_volumes.end(); ++i) {
std::cout << (**i) << std::endl;
}
std::cout << "-- Placed volumes:\n";
for (std::set<VPlacedVolume const*>::const_iterator i =
placed_volumes.begin(); i != placed_volumes.end(); ++i) {
std::cout << (**i) << std::endl;
}
std::cout << "-- Transformation matrices:\n";
for (std::set<TransformationMatrix const*>::const_iterator i =
matrices.begin(); i != matrices.end(); ++i) {
std::cout << (**i) << std::endl;
}
}
template <typename Type>
typename CudaManager::GpuAddress CudaManager::Lookup(
Type const *const key) {
const CpuAddress cpu_address = ToCpuAddress(key);
GpuAddress output = memory_map[cpu_address];
assert(output != NULL);
return output;
}
VUnplacedVolume* CudaManager::LookupUnplaced(
VUnplacedVolume const *const host_ptr) {
return static_cast<VUnplacedVolume*>(Lookup(host_ptr));
}
LogicalVolume* CudaManager::LookupLogical(
LogicalVolume const *const host_ptr) {
return static_cast<LogicalVolume*>(Lookup(host_ptr));
}
VPlacedVolume* CudaManager::LookupPlaced(
VPlacedVolume const *const host_ptr) {
return static_cast<VPlacedVolume*>(Lookup(host_ptr));
}
TransformationMatrix* CudaManager::LookupMatrix(
TransformationMatrix const *const host_ptr) {
return static_cast<TransformationMatrix*>(Lookup(host_ptr));
}
Array<Daughter>* CudaManager::LookupDaughters(
Container<Daughter> *const host_ptr) {
return static_cast<Array<Daughter>*>(Lookup(host_ptr));
}
Daughter* CudaManager::LookupDaughterArray(
Container<Daughter> *const host_ptr) {
Array<Daughter> const *const daughters = LookupDaughters(host_ptr);
return static_cast<Daughter*>(Lookup(daughters));
}
} // End namespace vecgeom |
ba2d39d8a0420560860fc69ea40b023e66505845.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "im2Gray.h"
#define TILE_WITH 16
/*
Given an input image d_in, perform the grayscale operation
using the luminance formula i.e.
o[i] = 0.224f*r + 0.587f*g + 0.111*b;
Your kernel needs to check for boundary conditions
and write the output pixels in gray scale format.
you may vary the BLOCK parameter.
*/
__global__
void im2Gray_share(uchar4 *d_in, unsigned char *d_gray, int numRows, int numCols){
__shared__ uchar4 r[TILE_WITH][TILE_WITH];
//__shared__ int c[TILE_WIDTH];
int x = threadIdx.x+blockIdx.x*blockDim.x;
int y = threadIdx.y+blockIdx.y*blockDim.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
if (y < numRows && x < numCols){
int w = x+y*numRows;
r[tx][ty] = d_in[w];
uchar4 imagePoint = r[tx][ty];
d_gray[w] = .299f*imagePoint.x + .587f*imagePoint.y + .114f*imagePoint.z;
__syncthreads();
}
}
__global__
void im2Gray(uchar4 *d_in, unsigned char *d_gray, int numRows, int numCols){
/*
Your kernel here: Make sure to check for boundary conditions
*/
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
if (y < numRows && x < numCols){
int pointIndex = y*numRows + x;
uchar4 imagePoint = d_in[pointIndex];
d_gray[pointIndex] = .299f*imagePoint.x + .587f*imagePoint.y + .114f*imagePoint.z;
}
}
void launch_im2gray(uchar4 *d_in, unsigned char* d_grey, size_t numRows, size_t numCols){
// configure launch params here
int x_thread = TILE_WITH;
int y_thread = TILE_WITH;
int grid_x = numCols/x_thread;
int grid_y = numRows/y_thread;
dim3 block(x_thread,y_thread,1);
dim3 grid(ceil(grid_x),ceil(grid_y), 1);
hipLaunchKernelGGL(( im2Gray), dim3(grid),dim3(block), 0, 0, d_in, d_grey, numRows, numCols);
hipLaunchKernelGGL(( im2Gray_share), dim3(grid),dim3(block), 0, 0, d_in, d_grey, numRows, numCols);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
}
| ba2d39d8a0420560860fc69ea40b023e66505845.cu | #include "im2Gray.h"
#define TILE_WITH 16
/*
Given an input image d_in, perform the grayscale operation
using the luminance formula i.e.
o[i] = 0.224f*r + 0.587f*g + 0.111*b;
Your kernel needs to check for boundary conditions
and write the output pixels in gray scale format.
you may vary the BLOCK parameter.
*/
__global__
void im2Gray_share(uchar4 *d_in, unsigned char *d_gray, int numRows, int numCols){
__shared__ uchar4 r[TILE_WITH][TILE_WITH];
//__shared__ int c[TILE_WIDTH];
int x = threadIdx.x+blockIdx.x*blockDim.x;
int y = threadIdx.y+blockIdx.y*blockDim.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
if (y < numRows && x < numCols){
int w = x+y*numRows;
r[tx][ty] = d_in[w];
uchar4 imagePoint = r[tx][ty];
d_gray[w] = .299f*imagePoint.x + .587f*imagePoint.y + .114f*imagePoint.z;
__syncthreads();
}
}
__global__
void im2Gray(uchar4 *d_in, unsigned char *d_gray, int numRows, int numCols){
/*
Your kernel here: Make sure to check for boundary conditions
*/
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
if (y < numRows && x < numCols){
int pointIndex = y*numRows + x;
uchar4 imagePoint = d_in[pointIndex];
d_gray[pointIndex] = .299f*imagePoint.x + .587f*imagePoint.y + .114f*imagePoint.z;
}
}
void launch_im2gray(uchar4 *d_in, unsigned char* d_grey, size_t numRows, size_t numCols){
// configure launch params here
int x_thread = TILE_WITH;
int y_thread = TILE_WITH;
int grid_x = numCols/x_thread;
int grid_y = numRows/y_thread;
dim3 block(x_thread,y_thread,1);
dim3 grid(ceil(grid_x),ceil(grid_y), 1);
im2Gray<<<grid,block>>>(d_in, d_grey, numRows, numCols);
im2Gray_share<<<grid,block>>>(d_in, d_grey, numRows, numCols);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
}
|
3e6f05315341fba4dd2556e657f73293a3c7ccc6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <Python.h>
#include <numpy/arrayobject.h>
#include "adder.h"
#define BLOCK_SIZE 128
matrix mat_from_pyarray(PyObject* arr)
{
matrix A;
A.mat = (float*)PyArray_DATA(arr);
A.mat_d = NULL;
A.dim[0] = (int)PyArray_DIM(arr,0);
A.dim[1] = (int)PyArray_DIM(arr,1);
return A;
}
static PyObject* Add(PyObject* self, PyObject* args)
{
PyObject *arg1=NULL, *arg2=NULL;
PyObject *npy_a=NULL, *npy_b=NULL, *npy_c=NULL;
if(!PyArg_ParseTuple(args, "OO", &arg1, &arg2))
return NULL;
if (arg1 == NULL) printf("arg1 NULL\n");
// convert to contiguous arrays
npy_a = PyArray_FROM_OTF(arg1, NPY_FLOAT, NPY_IN_FARRAY);
if (npy_a == NULL) goto fail;
npy_b = PyArray_FROM_OTF(arg2, NPY_FLOAT, NPY_IN_FARRAY);
if (npy_b == NULL) goto fail;
npy_c = PyArray_EMPTY(PyArray_NDIM(npy_b), PyArray_DIMS(npy_b), NPY_FLOAT, 1);
if (npy_c == NULL) goto fail;
matrix a, b, c;
a = mat_from_pyarray(npy_a);
b = mat_from_pyarray(npy_b);
c = mat_from_pyarray(npy_c);
copy_matrix_to_device(&a);
copy_matrix_to_device(&b);
copy_matrix_to_device(&c);
element_add(a,b,c);
copy_matrix_from_device(&c);
Py_DECREF(npy_a);
Py_DECREF(npy_b);
return npy_c;
fail:
fprintf(stderr,"failed to allocate numpy arrays\n");
return NULL;
}
PyMethodDef adder_methods[] =
{
{"add", Add, METH_VARARGS, "Add numbers"},
{NULL,NULL,0,NULL}
};
PyMODINIT_FUNC initadder(void)
{
Py_InitModule("adder", adder_methods);
import_array();
}
__global__ void vecAdd(float* a, float* b, float* c, const int N);
void element_add(matrix a, matrix b, matrix c)
{
if (a.dim[0] != b.dim[0] || a.dim[1] != b.dim[1] ||
a.dim[0] != c.dim[0] || a.dim[1] != c.dim[1])
{
fprintf(stderr,"element_add: dimension mismatch\n");
exit(1);
}
const int N = a.dim[0]*a.dim[1];
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid((N/dimBlock.x) + (!(N%dimBlock.x)?0:1));
hipLaunchKernelGGL(( vecAdd), dim3(dimGrid),dim3(dimBlock), 0, 0, a.mat_d,b.mat_d,c.mat_d,N);
}
__global__ void vecAdd(float* a, float* b, float* c, const int N)
{
const int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i<N)
c[i] = a[i] + b[i];
}
void copy_matrix_to_device(matrix* A)
{
const int N = A->dim[0]*A->dim[1];
hipError_t err;
if (A->mat == NULL){
fprintf(stderr,"copy_matrix_to_device: matrix not allocated on host\n");
exit(1);
}
if (A->mat_d == NULL){
err = hipMalloc((void**) &(A->mat_d), sizeof(float)*N);
if(err != hipSuccess){
fprintf(stderr,"copy_matrix_to_device: hipMalloc: FAIL\n");
exit(1);
}
}
err = hipMemcpy(A->mat_d,A->mat,sizeof(float)*N, hipMemcpyHostToDevice);
switch (err){
case hipErrorInvalidValue:
fprintf(stderr,"copy_matrix_to_device: hipMemcpy: InvalidValue\n");
exit(1);
break;
case hipErrorInvalidDevicePointer:
fprintf(stderr,"copy_matrix_to_device: hipMemcpy: InvalidDevicePointer\n");
exit(1);
break;
case hipErrorInvalidMemcpyDirection:
fprintf(stderr,"copy_matrix_to_device: hipMemcpy: InvalidMemcpyDirection\n");
exit(1);
break;
}
}
void copy_matrix_from_device(matrix* A)
{
const int N = A->dim[0]*A->dim[1];
if (A->mat_d == NULL){
fprintf(stderr,"copy_matrix_from_device: matrix not allocated on device\n");
exit(1);
}
if (A->mat == NULL)
hipHostMalloc((void**)&(A->mat),sizeof(float)*N);
//A->mat = (float*)malloc(sizeof(float)*N);
hipError_t err;
err = hipMemcpy(A->mat,A->mat_d,sizeof(float)*N, hipMemcpyDeviceToHost);
switch (err){
case hipErrorInvalidValue:
fprintf(stderr,"copy_matrix_to_device: hipMemcpy: InvalidValue\n");
exit(1);
break;
case hipErrorInvalidDevicePointer:
fprintf(stderr,"copy_matrix_to_device: hipMemcpy: InvalidDevicePointer\n");
exit(1);
break;
case hipErrorInvalidMemcpyDirection:
fprintf(stderr,"copy_matrix_to_device: hipMemcpy: InvalidMemcpyDirection\n");
exit(1);
break;
}
}
| 3e6f05315341fba4dd2556e657f73293a3c7ccc6.cu |
#include <stdio.h>
#include <stdlib.h>
#include <Python.h>
#include <numpy/arrayobject.h>
#include "adder.h"
#define BLOCK_SIZE 128
matrix mat_from_pyarray(PyObject* arr)
{
matrix A;
A.mat = (float*)PyArray_DATA(arr);
A.mat_d = NULL;
A.dim[0] = (int)PyArray_DIM(arr,0);
A.dim[1] = (int)PyArray_DIM(arr,1);
return A;
}
static PyObject* Add(PyObject* self, PyObject* args)
{
PyObject *arg1=NULL, *arg2=NULL;
PyObject *npy_a=NULL, *npy_b=NULL, *npy_c=NULL;
if(!PyArg_ParseTuple(args, "OO", &arg1, &arg2))
return NULL;
if (arg1 == NULL) printf("arg1 NULL\n");
// convert to contiguous arrays
npy_a = PyArray_FROM_OTF(arg1, NPY_FLOAT, NPY_IN_FARRAY);
if (npy_a == NULL) goto fail;
npy_b = PyArray_FROM_OTF(arg2, NPY_FLOAT, NPY_IN_FARRAY);
if (npy_b == NULL) goto fail;
npy_c = PyArray_EMPTY(PyArray_NDIM(npy_b), PyArray_DIMS(npy_b), NPY_FLOAT, 1);
if (npy_c == NULL) goto fail;
matrix a, b, c;
a = mat_from_pyarray(npy_a);
b = mat_from_pyarray(npy_b);
c = mat_from_pyarray(npy_c);
copy_matrix_to_device(&a);
copy_matrix_to_device(&b);
copy_matrix_to_device(&c);
element_add(a,b,c);
copy_matrix_from_device(&c);
Py_DECREF(npy_a);
Py_DECREF(npy_b);
return npy_c;
fail:
fprintf(stderr,"failed to allocate numpy arrays\n");
return NULL;
}
PyMethodDef adder_methods[] =
{
{"add", Add, METH_VARARGS, "Add numbers"},
{NULL,NULL,0,NULL}
};
PyMODINIT_FUNC initadder(void)
{
Py_InitModule("adder", adder_methods);
import_array();
}
__global__ void vecAdd(float* a, float* b, float* c, const int N);
void element_add(matrix a, matrix b, matrix c)
{
if (a.dim[0] != b.dim[0] || a.dim[1] != b.dim[1] ||
a.dim[0] != c.dim[0] || a.dim[1] != c.dim[1])
{
fprintf(stderr,"element_add: dimension mismatch\n");
exit(1);
}
const int N = a.dim[0]*a.dim[1];
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid((N/dimBlock.x) + (!(N%dimBlock.x)?0:1));
vecAdd<<<dimGrid,dimBlock>>>(a.mat_d,b.mat_d,c.mat_d,N);
}
__global__ void vecAdd(float* a, float* b, float* c, const int N)
{
const int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i<N)
c[i] = a[i] + b[i];
}
void copy_matrix_to_device(matrix* A)
{
const int N = A->dim[0]*A->dim[1];
cudaError_t err;
if (A->mat == NULL){
fprintf(stderr,"copy_matrix_to_device: matrix not allocated on host\n");
exit(1);
}
if (A->mat_d == NULL){
err = cudaMalloc((void**) &(A->mat_d), sizeof(float)*N);
if(err != cudaSuccess){
fprintf(stderr,"copy_matrix_to_device: cudaMalloc: FAIL\n");
exit(1);
}
}
err = cudaMemcpy(A->mat_d,A->mat,sizeof(float)*N, cudaMemcpyHostToDevice);
switch (err){
case cudaErrorInvalidValue:
fprintf(stderr,"copy_matrix_to_device: cudaMemcpy: InvalidValue\n");
exit(1);
break;
case cudaErrorInvalidDevicePointer:
fprintf(stderr,"copy_matrix_to_device: cudaMemcpy: InvalidDevicePointer\n");
exit(1);
break;
case cudaErrorInvalidMemcpyDirection:
fprintf(stderr,"copy_matrix_to_device: cudaMemcpy: InvalidMemcpyDirection\n");
exit(1);
break;
}
}
void copy_matrix_from_device(matrix* A)
{
const int N = A->dim[0]*A->dim[1];
if (A->mat_d == NULL){
fprintf(stderr,"copy_matrix_from_device: matrix not allocated on device\n");
exit(1);
}
if (A->mat == NULL)
cudaMallocHost((void**)&(A->mat),sizeof(float)*N);
//A->mat = (float*)malloc(sizeof(float)*N);
cudaError_t err;
err = cudaMemcpy(A->mat,A->mat_d,sizeof(float)*N, cudaMemcpyDeviceToHost);
switch (err){
case cudaErrorInvalidValue:
fprintf(stderr,"copy_matrix_to_device: cudaMemcpy: InvalidValue\n");
exit(1);
break;
case cudaErrorInvalidDevicePointer:
fprintf(stderr,"copy_matrix_to_device: cudaMemcpy: InvalidDevicePointer\n");
exit(1);
break;
case cudaErrorInvalidMemcpyDirection:
fprintf(stderr,"copy_matrix_to_device: cudaMemcpy: InvalidMemcpyDirection\n");
exit(1);
break;
}
}
|
24e326416287e6b8f7f729c0d41d2199720ebff4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Matrix multiplication: C = A * B.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil.h>
// includes, kernels
#include <matrixmul_kernel.cu>
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int);
Matrix AllocateDeviceMatrix(const Matrix M);
Matrix AllocateMatrix(int height, int width, int init);
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost);
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice);
int ReadFile(Matrix* M, char* file_name);
void WriteFile(Matrix M, char* file_name);
void FreeDeviceMatrix(Matrix* M);
void FreeMatrix(Matrix* M);
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv) {
Matrix M;
Matrix N;
Matrix P;
int errorM = 0, errorN = 0;
srand(52);
if(argc != 5 && argc != 4)
{
// Allocate and initialize the matrices
M = AllocateMatrix(rand() % 1024, rand() % 1024, 1);
N = AllocateMatrix(M.width, rand() % 1024, 1);
P = AllocateMatrix(M.height, N.width, 0);
}
else
{
// Allocate and read in matrices from disk
int* params = NULL; //(int*)malloc(3 * sizeof(int));
unsigned int data_read = 3;
cutReadFilei(argv[1], ¶ms, &data_read, true);
if(data_read != 3){
printf("Error reading parameter file\n");
return 1;
}
M = AllocateMatrix(params[0], params[1], 0);
N = AllocateMatrix(params[1], params[2], 0);
P = AllocateMatrix(params[0], params[2], 0);
errorM = ReadFile(&M, argv[2]);
errorN = ReadFile(&N, argv[3]);
if(errorM || errorN )
{
printf("Error reading input files %d, %d\n", errorM, errorN);
return 1;
}
}
// M * N on the device
MatrixMulOnDevice(M, N, P);
printf("GPU computation complete\n");
// compute the matrix multiplication on the CPU for comparison
Matrix reference = AllocateMatrix(P.height, P.width, 0);
computeGold(reference.elements, M.elements, N.elements, M.height, M.width, N.width);
printf("CPU computation complete\n");
// in this case check if the result is equivalent to the expected soluion
CUTBoolean res = cutComparefe(reference.elements, P.elements, P.height*P.width, 0.001f);
printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED");
if(argc == 5)
{
WriteFile(P, argv[4]);
}
else if(argc == 2)
{
WriteFile(P, argv[1]);
}
// Free matrices
FreeMatrix(&M);
FreeMatrix(&N);
FreeMatrix(&P);
return 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P)
{
// Load M and N to the device
Matrix Md = AllocateDeviceMatrix(M);
CopyToDeviceMatrix(Md, M);
Matrix Nd = AllocateDeviceMatrix(N);
CopyToDeviceMatrix(Nd, N);
// Allocate P on the device
Matrix Pd = AllocateDeviceMatrix(P);
CopyToDeviceMatrix(Pd, P); // Clear memory
// Setup the execution configuration
dim3 dimBlock(32, 32);
dim3 dimGrid( (N.width-1) / dimBlock.x + 1, (M.height-1) / dimBlock.y + 1);
// Launch the device computation threads!
hipLaunchKernelGGL(( MatrixMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, Md, Nd, Pd);
hipDeviceSynchronize();
// Read P from the device
CopyFromDeviceMatrix(P, Pd);
/*
for(int i = 0; i<P.height; i++){
for(int j = 0; j <P.width; j++){
printf("%f ", P.elements[i*P.width + j]);
}
printf("\n");
}
*/
// Free device matrices
FreeDeviceMatrix(&Md);
FreeDeviceMatrix(&Nd);
FreeDeviceMatrix(&Pd);
}
// Allocate a device matrix of same size as M.
Matrix AllocateDeviceMatrix(const Matrix M)
{
Matrix Mdevice = M;
int size = M.width * M.height * sizeof(float);
hipMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
// Allocate a device matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
// If init == 2, initialize matrix parameters, but do not allocate memory
Matrix AllocateMatrix(int height, int width, int init)
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = NULL;
// don't allocate memory on option 2
if(init == 2)
return M;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < M.height * M.width; i++)
{
M.elements[i] = (init == 0) ? (0.0f) : (rand()*3 / (float)RAND_MAX);
}
return M;
}
// Copy a host matrix to a device matrix.
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
Mdevice.height = Mhost.height;
Mdevice.width = Mhost.width;
Mdevice.pitch = Mhost.pitch;
hipMemcpy(Mdevice.elements, Mhost.elements, size,
hipMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.width * Mdevice.height * sizeof(float);
hipMemcpy(Mhost.elements, Mdevice.elements, size,
hipMemcpyDeviceToHost);
}
// Free a device matrix.
void FreeDeviceMatrix(Matrix* M)
{
hipFree(M->elements);
M->elements = NULL;
}
// Free a host Matrix
void FreeMatrix(Matrix* M)
{
free(M->elements);
M->elements = NULL;
}
// Read a floating point matrix in from file
// Returns zero if the number of elements read is
// equals M.height * M.width, and 1 otherwise
int ReadFile(Matrix* M, char* file_name)
{
unsigned int data_read = M->height*M->width;
cutReadFilef(file_name, &(M->elements), &data_read, true);
return (data_read != (M->height * M->width));
}
// Write a 16x16 floating point matrix to file
void WriteFile(Matrix M, char* file_name)
{
cutWriteFilef(file_name, M.elements, M.width*M.height,
0.0001f);
}
| 24e326416287e6b8f7f729c0d41d2199720ebff4.cu | /*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Matrix multiplication: C = A * B.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil.h>
// includes, kernels
#include <matrixmul_kernel.cu>
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int);
Matrix AllocateDeviceMatrix(const Matrix M);
Matrix AllocateMatrix(int height, int width, int init);
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost);
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice);
int ReadFile(Matrix* M, char* file_name);
void WriteFile(Matrix M, char* file_name);
void FreeDeviceMatrix(Matrix* M);
void FreeMatrix(Matrix* M);
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv) {
Matrix M;
Matrix N;
Matrix P;
int errorM = 0, errorN = 0;
srand(52);
if(argc != 5 && argc != 4)
{
// Allocate and initialize the matrices
M = AllocateMatrix(rand() % 1024, rand() % 1024, 1);
N = AllocateMatrix(M.width, rand() % 1024, 1);
P = AllocateMatrix(M.height, N.width, 0);
}
else
{
// Allocate and read in matrices from disk
int* params = NULL; //(int*)malloc(3 * sizeof(int));
unsigned int data_read = 3;
cutReadFilei(argv[1], ¶ms, &data_read, true);
if(data_read != 3){
printf("Error reading parameter file\n");
return 1;
}
M = AllocateMatrix(params[0], params[1], 0);
N = AllocateMatrix(params[1], params[2], 0);
P = AllocateMatrix(params[0], params[2], 0);
errorM = ReadFile(&M, argv[2]);
errorN = ReadFile(&N, argv[3]);
if(errorM || errorN )
{
printf("Error reading input files %d, %d\n", errorM, errorN);
return 1;
}
}
// M * N on the device
MatrixMulOnDevice(M, N, P);
printf("GPU computation complete\n");
// compute the matrix multiplication on the CPU for comparison
Matrix reference = AllocateMatrix(P.height, P.width, 0);
computeGold(reference.elements, M.elements, N.elements, M.height, M.width, N.width);
printf("CPU computation complete\n");
// in this case check if the result is equivalent to the expected soluion
CUTBoolean res = cutComparefe(reference.elements, P.elements, P.height*P.width, 0.001f);
printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED");
if(argc == 5)
{
WriteFile(P, argv[4]);
}
else if(argc == 2)
{
WriteFile(P, argv[1]);
}
// Free matrices
FreeMatrix(&M);
FreeMatrix(&N);
FreeMatrix(&P);
return 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P)
{
// Load M and N to the device
Matrix Md = AllocateDeviceMatrix(M);
CopyToDeviceMatrix(Md, M);
Matrix Nd = AllocateDeviceMatrix(N);
CopyToDeviceMatrix(Nd, N);
// Allocate P on the device
Matrix Pd = AllocateDeviceMatrix(P);
CopyToDeviceMatrix(Pd, P); // Clear memory
// Setup the execution configuration
dim3 dimBlock(32, 32);
dim3 dimGrid( (N.width-1) / dimBlock.x + 1, (M.height-1) / dimBlock.y + 1);
// Launch the device computation threads!
MatrixMulKernel<<<dimGrid, dimBlock>>>(Md, Nd, Pd);
cudaThreadSynchronize();
// Read P from the device
CopyFromDeviceMatrix(P, Pd);
/*
for(int i = 0; i<P.height; i++){
for(int j = 0; j <P.width; j++){
printf("%f ", P.elements[i*P.width + j]);
}
printf("\n");
}
*/
// Free device matrices
FreeDeviceMatrix(&Md);
FreeDeviceMatrix(&Nd);
FreeDeviceMatrix(&Pd);
}
// Allocate a device matrix of same size as M.
Matrix AllocateDeviceMatrix(const Matrix M)
{
Matrix Mdevice = M;
int size = M.width * M.height * sizeof(float);
cudaMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
// Allocate a device matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
// If init == 2, initialize matrix parameters, but do not allocate memory
Matrix AllocateMatrix(int height, int width, int init)
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = NULL;
// don't allocate memory on option 2
if(init == 2)
return M;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < M.height * M.width; i++)
{
M.elements[i] = (init == 0) ? (0.0f) : (rand()*3 / (float)RAND_MAX);
}
return M;
}
// Copy a host matrix to a device matrix.
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
Mdevice.height = Mhost.height;
Mdevice.width = Mhost.width;
Mdevice.pitch = Mhost.pitch;
cudaMemcpy(Mdevice.elements, Mhost.elements, size,
cudaMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.width * Mdevice.height * sizeof(float);
cudaMemcpy(Mhost.elements, Mdevice.elements, size,
cudaMemcpyDeviceToHost);
}
// Free a device matrix.
void FreeDeviceMatrix(Matrix* M)
{
cudaFree(M->elements);
M->elements = NULL;
}
// Free a host Matrix
void FreeMatrix(Matrix* M)
{
free(M->elements);
M->elements = NULL;
}
// Read a floating point matrix in from file
// Returns zero if the number of elements read is
// equals M.height * M.width, and 1 otherwise
int ReadFile(Matrix* M, char* file_name)
{
unsigned int data_read = M->height*M->width;
cutReadFilef(file_name, &(M->elements), &data_read, true);
return (data_read != (M->height * M->width));
}
// Write a 16x16 floating point matrix to file
void WriteFile(Matrix M, char* file_name)
{
cutWriteFilef(file_name, M.elements, M.width*M.height,
0.0001f);
}
|
963b2461c0043796954e7cf638cc146411e15ff8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cmath>
__global__ void my_copysign(double* v)
{
int i = threadIdx.x;
*v = std::copysign(*v, double(i == 0 ? 1 : -1));
}
| 963b2461c0043796954e7cf638cc146411e15ff8.cu | #include <cmath>
__global__ void my_copysign(double* v)
{
int i = threadIdx.x;
*v = std::copysign(*v, double(i == 0 ? 1 : -1));
}
|
d1f3cd40dba65ee8a269f3790932be59bf4de684.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <math.h>
#include "timer.h"
// Number of threads
#define BLOCK_SIZE 16
#define GRID_SIZE 256
//Useful to read Error from CUDA Calls
#define CUDA_CALL(x) {if((x) != hipSuccess){ \
printf("CUDA error at %s:%d\n",__FILE__,__LINE__); \
printf(" %s\n", hipGetErrorString(hipGetLastError())); \
exit(EXIT_FAILURE);}}
// nCentroids and size on device
__constant__ int dev_nCentroids;
__constant__ int dev_size;
// global variables
int PALETTE_BYTES = 0; // nCentroids * sizeof(int)
int IMAGE_BYTES = 0; // width * height * sizeof(int)
//**********************************
//R,G,B Centroid's triple on device
// nCentroids on GPU is HARDCODED remind to update it manually!
__constant__ int dev_RedCentroid[20];
__constant__ int dev_GreenCentroid[20];
__constant__ int dev_BlueCentroid[20];
//**********************************
/*
* get RGB values from Initial Centroids (RAW format)
*
*/
bool loadPalette(char* filename, int nCentroids, int* redCentroid, int* greenCentroid, int* blueCentroid) {
FILE *imageFile;
int length = 0;
imageFile = fopen(filename,"r");
if (imageFile == NULL) {
return false;
} else {
for (int i = 0; i < nCentroids; i++) {
// R,G,B Centroid triple, nCentroids long
redCentroid[i] = fgetc(imageFile);
greenCentroid[i] = fgetc(imageFile);
blueCentroid[i] = fgetc(imageFile);
printf("%d, %d, %d\n",redCentroid[i], greenCentroid[i], blueCentroid[i] );
length++;
}
fclose(imageFile);
printf("\n");
//printf("Palette Length: %d\n", length);
return true;
}
}
/*
* Image file loader (RAW format)
* Each pixel is identified by a triple r, g, b
* These values are taken from a raw file and put into three monodimensional arrays whose length is width*height
*/
bool loadRawImage(char* filename, int* r, int* g, int* b, int size) {
FILE *imageFile;
imageFile = fopen(filename, "r");
if (imageFile == NULL) {
return false;
} else {
for (int i = 0; i < size; i++) {
r[i] = fgetc(imageFile);
g[i] = fgetc(imageFile);
b[i] = fgetc(imageFile);
}
fclose(imageFile);
/*for(int j = 0; j < h * w; j++) {
printf("%d, %d, %d ", r[j], g[j], b[j]);
}*/
return true;
}
}
/*
* Image file writer (RAW format)
*/
bool writeRawImage(char* filename, int* labelArray, int* redCentroid, int* greenCentroid, int* blueCentroid, int size){
FILE *imageFile;
imageFile = fopen(filename, "wb");
if(imageFile == NULL) {
return false;
} else {
for (int i = 0; i < size; i++) {
fputc((char) redCentroid[labelArray[i]], imageFile);
fputc((char) greenCentroid[labelArray[i]], imageFile);
fputc((char) blueCentroid[labelArray[i]], imageFile);
}
fclose(imageFile);
return true;
}
}
/*******************************************************************/
/*
* Clears arrays before each kernel getClusterLabel iteration
*
*/
__global__ void clearPaletteArrays(int *dev_sumRed,int *dev_sumGreen,int *dev_sumBlue, int* dev_pixelClusterCounter, int* dev_tempRedCentroid, int* dev_tempGreenCentroid, int* dev_tempBlueCentroid ) {
// 1 block, 16x16 threads
int threadID = threadIdx.x + threadIdx.y * blockDim.x;
if(threadID < dev_nCentroids) {
// nCentroids long
dev_sumRed[threadID] = 0;
dev_sumGreen[threadID] = 0;
dev_sumBlue[threadID] = 0;
dev_pixelClusterCounter[threadID] = 0;
dev_tempRedCentroid[threadID] = 0;
dev_tempGreenCentroid[threadID] = 0;
dev_tempBlueCentroid[threadID] = 0;
}
}// end clearPaletteArrays
/*
* Clear label array before each kernel getClusterLabel iteration
*/
__global__ void clearLabelArray(int *dev_labelArray){
// Global thread index
int threadID = (threadIdx.x + blockIdx.x * blockDim.x) + (threadIdx.y + blockIdx.y * blockDim.y) * blockDim.x * gridDim.x;
// labelArray is "size" long
if(threadID < dev_size) {
dev_labelArray[threadID] = 0;
}
}// end clearLabelArray
/*
* Finds the minimum distance between each triple dev_Red[i] dev_Green[i] dev_Blue[i] and all centroids.
* Then saves the equivalent centroid label in dev_labelArray.
* labelArray is "width*height" long, monodimensional array
*
* INPUT : pixel triple arrays dev_Red, dev_Green, dev_Blue. labelArray that will contains the label for each pixel triple
*/
__global__ void getClusterLabel(int *dev_Red,int *dev_Green,int *dev_Blue,int *dev_labelArray) {
// Global thread index
int threadID = (threadIdx.x + blockIdx.x * blockDim.x) + (threadIdx.y + blockIdx.y * blockDim.y) * blockDim.x * gridDim.x;
//default min value of distance
float min = 500.0, value;
//will be label
int index = 0;
if(threadID < dev_size) {
// Finding the nearest centroid to current triple identified by threadID thread
for(int i = 0; i < dev_nCentroids; i++) {
// Performing Euclidean distance, Saving current value
value = sqrtf(powf((dev_Red[threadID]-dev_RedCentroid[i]),2.0) + powf((dev_Green[threadID]-dev_GreenCentroid[i]),2.0) + powf((dev_Blue[threadID]-dev_BlueCentroid[i]),2.0));
if(value < min){
// saving new nearest centroid
min = value;
// Updating his index
index = i;
}
}// end for
// Writing to global memory the index of the nearest centroid
// for dev_Red[threadID], dev_Green[threadID], dev_Blue[threadID] pixel triple
dev_labelArray[threadID] = index;
}// end if
}// end getClusterLabel
/*
* Summing Red, Green, Blue values per cluster
* Counting how many pixels there are in each cluster
*
*/
__global__ void sumCluster(int *dev_Red,int *dev_Green,int *dev_Blue,int *dev_sumRed,int *dev_sumGreen,int *dev_sumBlue,int *dev_labelArray,int *dev_pixelClusterCounter) {
// Global thread index
int threadID = (threadIdx.x + blockIdx.x * blockDim.x) + (threadIdx.y + blockIdx.y * blockDim.y) * blockDim.x * gridDim.x;
if(threadID < dev_size) {
int currentLabelArray = dev_labelArray[threadID];
int currentRed = dev_Red[threadID];
int currentGreen = dev_Green[threadID];
int currentBlue = dev_Blue[threadID];
// Writing to global memory needs a serialization. Many threads are writing into the same few locations
atomicAdd(&dev_sumRed[currentLabelArray], currentRed);
atomicAdd(&dev_sumGreen[currentLabelArray], currentGreen);
atomicAdd(&dev_sumBlue[currentLabelArray], currentBlue);
atomicAdd(&dev_pixelClusterCounter[currentLabelArray], 1);
}
}// end sumCluster
/*
* Calculates the new R,G,B values of the centroids dividing the sum of color (for each channel) by the number of pixels in that cluster
* New values are stored in global memory since the current R,G,B values of the centroids are in read-only constant memory.
*/
__global__ void newCentroids(int *dev_tempRedCentroid, int *dev_tempGreenCentroid, int *dev_tempBlueCentroid,int* dev_sumRed, int *dev_sumGreen,int *dev_sumBlue, int* dev_pixelClusterCounter) {
// 1 block , 16*16 threads
int threadID = threadIdx.x + threadIdx.y * blockDim.x;
if(threadID < dev_nCentroids) {
int currentPixelCounter = dev_pixelClusterCounter[threadID];
int sumRed = dev_sumRed[threadID];
int sumGreen = dev_sumGreen[threadID];
int sumBlue = dev_sumBlue[threadID];
//new RGB Centroids' values written in global memory
dev_tempRedCentroid[threadID] = (int)(sumRed/currentPixelCounter);
dev_tempGreenCentroid[threadID] = (int)(sumGreen/currentPixelCounter);
dev_tempBlueCentroid[threadID] = (int)(sumBlue/currentPixelCounter);
}
}// end newCentroids
/*******************************************************************/
int main(int argc, char *argv[]) {
// init device
hipSetDevice(0);
hipDeviceSynchronize();
hipDeviceSynchronize();
//input raw file, output raw file, input palette raw file containing RGB values of initial centroids
char *inputFile, *outputFile, *palette;
//Pixels' r,g,b values. Centroid's r,g,b values
int *red, *green, *blue, *redCentroid, *greenCentroid, *blueCentroid;
// ref to GPU Pixels'RGB values, Centroids' RGB values
int *dev_Red, *dev_Green, *dev_Blue, *dev_tempRedCentroid, *dev_tempGreenCentroid, *dev_tempBlueCentroid;
// array containing ref to GPU label array variable
int *labelArray, *dev_labelArray;
// local variables for storing image width, height
// number of cluster, number of iterations, linear size of the image ( = width * height)
int width, height, nCentroids, nIterations,size;
//int IMAGE_BYTES, PALETTE_BYTES;
// ref to array where pixels' count are stored
int *pixelClusterCounter, *dev_pixelClusterCounter;
// ref to arrays where sum of RGB values for each cluster are stored
int *sumRed, *sumGreen, *sumBlue;
int *dev_sumRed, *dev_sumGreen, *dev_sumBlue;
// checking arguments
if (argc > 7) {
inputFile = argv[1];
outputFile = argv[2];
width = atoi(argv[3]);
height = atoi(argv[4]);
palette = argv[5];
nCentroids = atoi(argv[6]); // remind to update hardcoded nCentroids above
if(nCentroids > 256)
nCentroids = 256;
nIterations = atoi(argv[7]);
if(nIterations > 15)
nIterations = 15;
} else {
printf(" USAGE: kmeans.cu <inputfile.raw> <outputfile.raw> nRows nCols palette nCentroids nItarations \n");
printf(" <inputfile.raw>: input .raw file (sequence of bytes)\n");
printf(" <outputfile.raw>: output .raw file\n");
printf(" nRows: the number of rows of the image\n");
printf(" nCols: the number of columns of the image\n");
printf(" palette: RGB initial Centroids");
printf(" nCentroids: number of clusters");
printf(" nIterations: number of iterations of K-Means");
return 0;
}
// Setting image and palette size in bytes
IMAGE_BYTES = width * height * sizeof(int);
PALETTE_BYTES = nCentroids * sizeof(int);
size = width * height;
/* TODO useful for Texture implementation
int ra[width][height];
int ga[width][height];
int ba[width][height];
*/
printf("Image: %s\n",inputFile);
printf("Width: %d, Height: %d\n", width, height);
printf("#Clusters: %d, #Iterations: %d\n", nCentroids, nIterations);
// allocate memory on CPU
red = static_cast<int *>(malloc(IMAGE_BYTES));
green = static_cast<int *>(malloc(IMAGE_BYTES));
blue = static_cast<int *>(malloc(IMAGE_BYTES));
redCentroid = static_cast<int *>(malloc(PALETTE_BYTES));
greenCentroid = static_cast<int *>(malloc(PALETTE_BYTES));
blueCentroid = static_cast<int *>(malloc(PALETTE_BYTES));
labelArray = static_cast<int *>(malloc(IMAGE_BYTES));
sumRed = static_cast<int*>(malloc(PALETTE_BYTES));
sumGreen = static_cast<int*>(malloc(PALETTE_BYTES));
sumBlue = static_cast<int*>(malloc(PALETTE_BYTES));
pixelClusterCounter = static_cast<int*>(malloc(PALETTE_BYTES));
// Setting initial centroids
printf("Initial Centroids: \n");
if(loadPalette(palette, nCentroids, redCentroid, greenCentroid, blueCentroid)) {
} else {
printf("Unable to set Initial Centroids.\n");
}
// Loading image in r, g, b arrays
printf("Image loading...\n");
if (loadRawImage(inputFile, red, green, blue, size)) {
printf("Image loaded!\n");
} else {
printf("NOT loaded!\n");
return -1;
}
printf("\n");
/* TODO Useful for Texture implementation
int ri = 0;
int co = 0;
for( ri = 0; ri < width; ri++){
for( co = 0; co < height; co++) {
int x = ri+co*width;
ra[ri][co] = red[x];
ga[ri][co] = green[x];
ba[ri][co] = blue[x];
}
}
*/
if(IMAGE_BYTES == 0 || PALETTE_BYTES == 0) {
return -1;
}
// allocate memory on GPU
CUDA_CALL(hipMalloc((void**) &dev_Red, IMAGE_BYTES));
CUDA_CALL(hipMalloc((void**) &dev_Green, IMAGE_BYTES));
CUDA_CALL(hipMalloc((void**) &dev_Blue, IMAGE_BYTES));
CUDA_CALL(hipMalloc((void**) &dev_tempRedCentroid, PALETTE_BYTES));
CUDA_CALL(hipMalloc((void**) &dev_tempGreenCentroid, PALETTE_BYTES));
CUDA_CALL(hipMalloc((void**) &dev_tempBlueCentroid, PALETTE_BYTES));
CUDA_CALL(hipMalloc((void**) &dev_labelArray, IMAGE_BYTES));
CUDA_CALL(hipMalloc((void**) &dev_sumRed, PALETTE_BYTES));
CUDA_CALL(hipMalloc((void**) &dev_sumGreen, PALETTE_BYTES));
CUDA_CALL(hipMalloc((void**) &dev_sumBlue, PALETTE_BYTES));
CUDA_CALL(hipMalloc((void**) &dev_pixelClusterCounter, PALETTE_BYTES));
// copy host CPU memory to GPU
CUDA_CALL(hipMemcpy(dev_Red, red, IMAGE_BYTES, hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(dev_Green, green, IMAGE_BYTES, hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(dev_Blue, blue, IMAGE_BYTES, hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(dev_tempRedCentroid, redCentroid,PALETTE_BYTES,hipMemcpyHostToDevice ));
CUDA_CALL(hipMemcpy(dev_tempGreenCentroid, greenCentroid,PALETTE_BYTES,hipMemcpyHostToDevice ));
CUDA_CALL(hipMemcpy(dev_tempBlueCentroid, blueCentroid,PALETTE_BYTES,hipMemcpyHostToDevice ));
CUDA_CALL(hipMemcpy(dev_labelArray, labelArray, IMAGE_BYTES, hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(dev_sumRed, sumRed, PALETTE_BYTES, hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(dev_sumGreen, sumGreen, PALETTE_BYTES, hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(dev_sumBlue, sumBlue, PALETTE_BYTES, hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(dev_pixelClusterCounter, pixelClusterCounter, PALETTE_BYTES, hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpyToSymbol(dev_RedCentroid, redCentroid, PALETTE_BYTES));
CUDA_CALL(hipMemcpyToSymbol(dev_GreenCentroid, greenCentroid, PALETTE_BYTES));
CUDA_CALL(hipMemcpyToSymbol(dev_BlueCentroid, blueCentroid, PALETTE_BYTES));
CUDA_CALL(hipMemcpyToSymbol(dev_nCentroids,&nCentroids, sizeof(int)));
CUDA_CALL(hipMemcpyToSymbol(dev_size, &size, sizeof(int)));
// Clearing centroids on host
for(int i = 0; i < nCentroids; i++) {
redCentroid[i] = 0;
greenCentroid[i] = 0;
blueCentroid[i] = 0;
}
// Defining grid size
int BLOCK_X, BLOCK_Y;
BLOCK_X = ceil(width/BLOCK_SIZE);
BLOCK_Y = ceil(height/BLOCK_SIZE);
if(BLOCK_X > GRID_SIZE)
BLOCK_X = GRID_SIZE;
if(BLOCK_Y > GRID_SIZE)
BLOCK_Y = GRID_SIZE;
//2D Grid
//Minimum number of threads that can handle widthheight pixels
dim3 dimGRID(BLOCK_X,BLOCK_Y);
//2D Block
//Each dimension is fixed
dim3 dimBLOCK(BLOCK_SIZE,BLOCK_SIZE);
//Starting timer
GpuTimer timer;
timer.Start();
printf("Launching K-Means Kernels.. \n");
//Iteration of kmeans algorithm
for(int i = 0; i < nIterations; i++) {
// Passing image RGB components, palette RGB components, label Array, number of Clusters
// Init arrays' values to 0
// Kernel needs only 1 block since nClusters
hipLaunchKernelGGL(( clearPaletteArrays), dim3(1), dim3(dimBLOCK), 0, 0, dev_sumRed, dev_sumGreen, dev_sumBlue, dev_pixelClusterCounter, dev_tempRedCentroid, dev_tempGreenCentroid, dev_tempBlueCentroid);
// Init labelarray values to 0
hipLaunchKernelGGL(( clearLabelArray), dim3(dimGRID), dim3(dimBLOCK), 0, 0, dev_labelArray);
// Calculates the distance from each pixel and all centroids
// Then saves the equivalent label in dev_labelArray
hipLaunchKernelGGL(( getClusterLabel), dim3(dimGRID), dim3(dimBLOCK) , 0, 0, dev_Red, dev_Green, dev_Blue,dev_labelArray);
//Sums RGB values in each Cluster
hipLaunchKernelGGL(( sumCluster), dim3(dimGRID), dim3(dimBLOCK), 0, 0, dev_Red, dev_Green, dev_Blue, dev_sumRed, dev_sumGreen, dev_sumBlue, dev_labelArray,dev_pixelClusterCounter);
//Finds new RGB Centroids' values
hipLaunchKernelGGL(( newCentroids), dim3(1),dim3(dimBLOCK) , 0, 0, dev_tempRedCentroid, dev_tempGreenCentroid, dev_tempBlueCentroid, dev_sumRed, dev_sumGreen, dev_sumBlue, dev_pixelClusterCounter);
//Old RGB Centroids' values are in constant memory
//Updated RGB Centroids' values are in global memory
//We need a swap
CUDA_CALL(hipMemcpy(redCentroid, dev_tempRedCentroid, PALETTE_BYTES,hipMemcpyDeviceToHost));
CUDA_CALL(hipMemcpy(greenCentroid, dev_tempGreenCentroid, PALETTE_BYTES,hipMemcpyDeviceToHost));
CUDA_CALL(hipMemcpy(blueCentroid, dev_tempBlueCentroid, PALETTE_BYTES,hipMemcpyDeviceToHost));
//Uploading in constant memory updated RGB Centroids' values
CUDA_CALL(hipMemcpyToSymbol(dev_RedCentroid, redCentroid, PALETTE_BYTES));
CUDA_CALL(hipMemcpyToSymbol(dev_GreenCentroid, greenCentroid, PALETTE_BYTES));
CUDA_CALL(hipMemcpyToSymbol(dev_BlueCentroid, blueCentroid, PALETTE_BYTES));
timer.Stop();
}
// DEBUG
CUDA_CALL(hipMemcpy(labelArray, dev_labelArray, IMAGE_BYTES, hipMemcpyDeviceToHost));
CUDA_CALL(hipMemcpy(sumRed, dev_sumRed, PALETTE_BYTES, hipMemcpyDeviceToHost));
CUDA_CALL(hipMemcpy(sumGreen, dev_sumGreen, PALETTE_BYTES, hipMemcpyDeviceToHost));
CUDA_CALL(hipMemcpy(sumBlue, dev_sumBlue, PALETTE_BYTES, hipMemcpyDeviceToHost));
CUDA_CALL(hipMemcpy(pixelClusterCounter, dev_pixelClusterCounter, PALETTE_BYTES, hipMemcpyDeviceToHost));
printf("Kmeans code ran in: %f msecs.\n", timer.Elapsed());
printf("\n");
// labelArray DEBUG
int counter = 0;
printf("Label Array:\n");
for(int i = 0; i < (size); i++) {
//printf("%d\n", labelArray[i]);
counter++;
}
printf("printing counter %d\n", counter);
counter = 0;
printf("Sum Arrays:\n");
for(int j = 0; j < nCentroids; j++) {
printf("r: %u g: %u b: %u \n", sumRed[j], sumGreen[j], sumBlue[j]);
counter++;
}
printf("\n");
printf("Pixels per centroids:\n");
for(int k = 0; k < nCentroids; k++){
printf("%d centroid: %d pixels\n", k, pixelClusterCounter[k]);
}
printf("\n");
printf("New centroids:\n");
for(int i = 0; i < nCentroids; i++) {
printf("%d, %d, %d \n", redCentroid[i], greenCentroid[i], blueCentroid[i]);
}
// writing...
printf("Image writing...\n");
if (writeRawImage(outputFile,labelArray, redCentroid, greenCentroid, blueCentroid, size)) {
printf("Image written!\n");
} else {
printf("NOT written!\n");
return -1;
}
free(red);
free(green);
free(blue);
free(redCentroid);
free(greenCentroid);
free(blueCentroid);
free(labelArray);
free(sumRed);
free(sumGreen);
free(sumBlue);
free(pixelClusterCounter);
CUDA_CALL(hipFree(dev_Red));
CUDA_CALL(hipFree(dev_Green));
CUDA_CALL(hipFree(dev_Blue));
CUDA_CALL(hipFree(dev_tempRedCentroid));
CUDA_CALL(hipFree(dev_tempGreenCentroid));
CUDA_CALL(hipFree(dev_tempBlueCentroid));
CUDA_CALL(hipFree(dev_labelArray));
CUDA_CALL(hipFree(dev_sumRed));
CUDA_CALL(hipFree(dev_sumGreen));
CUDA_CALL(hipFree(dev_sumBlue));
CUDA_CALL(hipFree(dev_pixelClusterCounter));
printf("That's the end.\n");
return 0;
}
| d1f3cd40dba65ee8a269f3790932be59bf4de684.cu | #include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <math.h>
#include "timer.h"
// Number of threads
#define BLOCK_SIZE 16
#define GRID_SIZE 256
//Useful to read Error from CUDA Calls
#define CUDA_CALL(x) {if((x) != cudaSuccess){ \
printf("CUDA error at %s:%d\n",__FILE__,__LINE__); \
printf(" %s\n", cudaGetErrorString(cudaGetLastError())); \
exit(EXIT_FAILURE);}}
// nCentroids and size on device
__constant__ int dev_nCentroids;
__constant__ int dev_size;
// global variables
int PALETTE_BYTES = 0; // nCentroids * sizeof(int)
int IMAGE_BYTES = 0; // width * height * sizeof(int)
//**********************************
//R,G,B Centroid's triple on device
// nCentroids on GPU is HARDCODED remind to update it manually!
__constant__ int dev_RedCentroid[20];
__constant__ int dev_GreenCentroid[20];
__constant__ int dev_BlueCentroid[20];
//**********************************
/*
* get RGB values from Initial Centroids (RAW format)
*
*/
bool loadPalette(char* filename, int nCentroids, int* redCentroid, int* greenCentroid, int* blueCentroid) {
FILE *imageFile;
int length = 0;
imageFile = fopen(filename,"r");
if (imageFile == NULL) {
return false;
} else {
for (int i = 0; i < nCentroids; i++) {
// R,G,B Centroid triple, nCentroids long
redCentroid[i] = fgetc(imageFile);
greenCentroid[i] = fgetc(imageFile);
blueCentroid[i] = fgetc(imageFile);
printf("%d, %d, %d\n",redCentroid[i], greenCentroid[i], blueCentroid[i] );
length++;
}
fclose(imageFile);
printf("\n");
//printf("Palette Length: %d\n", length);
return true;
}
}
/*
* Image file loader (RAW format)
* Each pixel is identified by a triple r, g, b
* These values are taken from a raw file and put into three monodimensional arrays whose length is width*height
*/
bool loadRawImage(char* filename, int* r, int* g, int* b, int size) {
FILE *imageFile;
imageFile = fopen(filename, "r");
if (imageFile == NULL) {
return false;
} else {
for (int i = 0; i < size; i++) {
r[i] = fgetc(imageFile);
g[i] = fgetc(imageFile);
b[i] = fgetc(imageFile);
}
fclose(imageFile);
/*for(int j = 0; j < h * w; j++) {
printf("%d, %d, %d ", r[j], g[j], b[j]);
}*/
return true;
}
}
/*
* Image file writer (RAW format)
*/
bool writeRawImage(char* filename, int* labelArray, int* redCentroid, int* greenCentroid, int* blueCentroid, int size){
FILE *imageFile;
imageFile = fopen(filename, "wb");
if(imageFile == NULL) {
return false;
} else {
for (int i = 0; i < size; i++) {
fputc((char) redCentroid[labelArray[i]], imageFile);
fputc((char) greenCentroid[labelArray[i]], imageFile);
fputc((char) blueCentroid[labelArray[i]], imageFile);
}
fclose(imageFile);
return true;
}
}
/*******************************************************************/
/*
* Clears arrays before each kernel getClusterLabel iteration
*
*/
__global__ void clearPaletteArrays(int *dev_sumRed,int *dev_sumGreen,int *dev_sumBlue, int* dev_pixelClusterCounter, int* dev_tempRedCentroid, int* dev_tempGreenCentroid, int* dev_tempBlueCentroid ) {
// 1 block, 16x16 threads
int threadID = threadIdx.x + threadIdx.y * blockDim.x;
if(threadID < dev_nCentroids) {
// nCentroids long
dev_sumRed[threadID] = 0;
dev_sumGreen[threadID] = 0;
dev_sumBlue[threadID] = 0;
dev_pixelClusterCounter[threadID] = 0;
dev_tempRedCentroid[threadID] = 0;
dev_tempGreenCentroid[threadID] = 0;
dev_tempBlueCentroid[threadID] = 0;
}
}// end clearPaletteArrays
/*
* Clear label array before each kernel getClusterLabel iteration
*/
__global__ void clearLabelArray(int *dev_labelArray){
// Global thread index
int threadID = (threadIdx.x + blockIdx.x * blockDim.x) + (threadIdx.y + blockIdx.y * blockDim.y) * blockDim.x * gridDim.x;
// labelArray is "size" long
if(threadID < dev_size) {
dev_labelArray[threadID] = 0;
}
}// end clearLabelArray
/*
* Finds the minimum distance between each triple dev_Red[i] dev_Green[i] dev_Blue[i] and all centroids.
* Then saves the equivalent centroid label in dev_labelArray.
* labelArray is "width*height" long, monodimensional array
*
* INPUT : pixel triple arrays dev_Red, dev_Green, dev_Blue. labelArray that will contains the label for each pixel triple
*/
__global__ void getClusterLabel(int *dev_Red,int *dev_Green,int *dev_Blue,int *dev_labelArray) {
// Global thread index
int threadID = (threadIdx.x + blockIdx.x * blockDim.x) + (threadIdx.y + blockIdx.y * blockDim.y) * blockDim.x * gridDim.x;
//default min value of distance
float min = 500.0, value;
//will be label
int index = 0;
if(threadID < dev_size) {
// Finding the nearest centroid to current triple identified by threadID thread
for(int i = 0; i < dev_nCentroids; i++) {
// Performing Euclidean distance, Saving current value
value = sqrtf(powf((dev_Red[threadID]-dev_RedCentroid[i]),2.0) + powf((dev_Green[threadID]-dev_GreenCentroid[i]),2.0) + powf((dev_Blue[threadID]-dev_BlueCentroid[i]),2.0));
if(value < min){
// saving new nearest centroid
min = value;
// Updating his index
index = i;
}
}// end for
// Writing to global memory the index of the nearest centroid
// for dev_Red[threadID], dev_Green[threadID], dev_Blue[threadID] pixel triple
dev_labelArray[threadID] = index;
}// end if
}// end getClusterLabel
/*
* Summing Red, Green, Blue values per cluster
* Counting how many pixels there are in each cluster
*
*/
__global__ void sumCluster(int *dev_Red,int *dev_Green,int *dev_Blue,int *dev_sumRed,int *dev_sumGreen,int *dev_sumBlue,int *dev_labelArray,int *dev_pixelClusterCounter) {
// Global thread index
int threadID = (threadIdx.x + blockIdx.x * blockDim.x) + (threadIdx.y + blockIdx.y * blockDim.y) * blockDim.x * gridDim.x;
if(threadID < dev_size) {
int currentLabelArray = dev_labelArray[threadID];
int currentRed = dev_Red[threadID];
int currentGreen = dev_Green[threadID];
int currentBlue = dev_Blue[threadID];
// Writing to global memory needs a serialization. Many threads are writing into the same few locations
atomicAdd(&dev_sumRed[currentLabelArray], currentRed);
atomicAdd(&dev_sumGreen[currentLabelArray], currentGreen);
atomicAdd(&dev_sumBlue[currentLabelArray], currentBlue);
atomicAdd(&dev_pixelClusterCounter[currentLabelArray], 1);
}
}// end sumCluster
/*
* Calculates the new R,G,B values of the centroids dividing the sum of color (for each channel) by the number of pixels in that cluster
* New values are stored in global memory since the current R,G,B values of the centroids are in read-only constant memory.
*/
__global__ void newCentroids(int *dev_tempRedCentroid, int *dev_tempGreenCentroid, int *dev_tempBlueCentroid,int* dev_sumRed, int *dev_sumGreen,int *dev_sumBlue, int* dev_pixelClusterCounter) {
// 1 block , 16*16 threads
int threadID = threadIdx.x + threadIdx.y * blockDim.x;
if(threadID < dev_nCentroids) {
int currentPixelCounter = dev_pixelClusterCounter[threadID];
int sumRed = dev_sumRed[threadID];
int sumGreen = dev_sumGreen[threadID];
int sumBlue = dev_sumBlue[threadID];
//new RGB Centroids' values written in global memory
dev_tempRedCentroid[threadID] = (int)(sumRed/currentPixelCounter);
dev_tempGreenCentroid[threadID] = (int)(sumGreen/currentPixelCounter);
dev_tempBlueCentroid[threadID] = (int)(sumBlue/currentPixelCounter);
}
}// end newCentroids
/*******************************************************************/
int main(int argc, char *argv[]) {
// init device
cudaSetDevice(0);
cudaDeviceSynchronize();
cudaThreadSynchronize();
//input raw file, output raw file, input palette raw file containing RGB values of initial centroids
char *inputFile, *outputFile, *palette;
//Pixels' r,g,b values. Centroid's r,g,b values
int *red, *green, *blue, *redCentroid, *greenCentroid, *blueCentroid;
// ref to GPU Pixels'RGB values, Centroids' RGB values
int *dev_Red, *dev_Green, *dev_Blue, *dev_tempRedCentroid, *dev_tempGreenCentroid, *dev_tempBlueCentroid;
// array containing ref to GPU label array variable
int *labelArray, *dev_labelArray;
// local variables for storing image width, height
// number of cluster, number of iterations, linear size of the image ( = width * height)
int width, height, nCentroids, nIterations,size;
//int IMAGE_BYTES, PALETTE_BYTES;
// ref to array where pixels' count are stored
int *pixelClusterCounter, *dev_pixelClusterCounter;
// ref to arrays where sum of RGB values for each cluster are stored
int *sumRed, *sumGreen, *sumBlue;
int *dev_sumRed, *dev_sumGreen, *dev_sumBlue;
// checking arguments
if (argc > 7) {
inputFile = argv[1];
outputFile = argv[2];
width = atoi(argv[3]);
height = atoi(argv[4]);
palette = argv[5];
nCentroids = atoi(argv[6]); // remind to update hardcoded nCentroids above
if(nCentroids > 256)
nCentroids = 256;
nIterations = atoi(argv[7]);
if(nIterations > 15)
nIterations = 15;
} else {
printf(" USAGE: kmeans.cu <inputfile.raw> <outputfile.raw> nRows nCols palette nCentroids nItarations \n");
printf(" <inputfile.raw>: input .raw file (sequence of bytes)\n");
printf(" <outputfile.raw>: output .raw file\n");
printf(" nRows: the number of rows of the image\n");
printf(" nCols: the number of columns of the image\n");
printf(" palette: RGB initial Centroids");
printf(" nCentroids: number of clusters");
printf(" nIterations: number of iterations of K-Means");
return 0;
}
// Setting image and palette size in bytes
IMAGE_BYTES = width * height * sizeof(int);
PALETTE_BYTES = nCentroids * sizeof(int);
size = width * height;
/* TODO useful for Texture implementation
int ra[width][height];
int ga[width][height];
int ba[width][height];
*/
printf("Image: %s\n",inputFile);
printf("Width: %d, Height: %d\n", width, height);
printf("#Clusters: %d, #Iterations: %d\n", nCentroids, nIterations);
// allocate memory on CPU
red = static_cast<int *>(malloc(IMAGE_BYTES));
green = static_cast<int *>(malloc(IMAGE_BYTES));
blue = static_cast<int *>(malloc(IMAGE_BYTES));
redCentroid = static_cast<int *>(malloc(PALETTE_BYTES));
greenCentroid = static_cast<int *>(malloc(PALETTE_BYTES));
blueCentroid = static_cast<int *>(malloc(PALETTE_BYTES));
labelArray = static_cast<int *>(malloc(IMAGE_BYTES));
sumRed = static_cast<int*>(malloc(PALETTE_BYTES));
sumGreen = static_cast<int*>(malloc(PALETTE_BYTES));
sumBlue = static_cast<int*>(malloc(PALETTE_BYTES));
pixelClusterCounter = static_cast<int*>(malloc(PALETTE_BYTES));
// Setting initial centroids
printf("Initial Centroids: \n");
if(loadPalette(palette, nCentroids, redCentroid, greenCentroid, blueCentroid)) {
} else {
printf("Unable to set Initial Centroids.\n");
}
// Loading image in r, g, b arrays
printf("Image loading...\n");
if (loadRawImage(inputFile, red, green, blue, size)) {
printf("Image loaded!\n");
} else {
printf("NOT loaded!\n");
return -1;
}
printf("\n");
/* TODO Useful for Texture implementation
int ri = 0;
int co = 0;
for( ri = 0; ri < width; ri++){
for( co = 0; co < height; co++) {
int x = ri+co*width;
ra[ri][co] = red[x];
ga[ri][co] = green[x];
ba[ri][co] = blue[x];
}
}
*/
if(IMAGE_BYTES == 0 || PALETTE_BYTES == 0) {
return -1;
}
// allocate memory on GPU
CUDA_CALL(cudaMalloc((void**) &dev_Red, IMAGE_BYTES));
CUDA_CALL(cudaMalloc((void**) &dev_Green, IMAGE_BYTES));
CUDA_CALL(cudaMalloc((void**) &dev_Blue, IMAGE_BYTES));
CUDA_CALL(cudaMalloc((void**) &dev_tempRedCentroid, PALETTE_BYTES));
CUDA_CALL(cudaMalloc((void**) &dev_tempGreenCentroid, PALETTE_BYTES));
CUDA_CALL(cudaMalloc((void**) &dev_tempBlueCentroid, PALETTE_BYTES));
CUDA_CALL(cudaMalloc((void**) &dev_labelArray, IMAGE_BYTES));
CUDA_CALL(cudaMalloc((void**) &dev_sumRed, PALETTE_BYTES));
CUDA_CALL(cudaMalloc((void**) &dev_sumGreen, PALETTE_BYTES));
CUDA_CALL(cudaMalloc((void**) &dev_sumBlue, PALETTE_BYTES));
CUDA_CALL(cudaMalloc((void**) &dev_pixelClusterCounter, PALETTE_BYTES));
// copy host CPU memory to GPU
CUDA_CALL(cudaMemcpy(dev_Red, red, IMAGE_BYTES, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(dev_Green, green, IMAGE_BYTES, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(dev_Blue, blue, IMAGE_BYTES, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(dev_tempRedCentroid, redCentroid,PALETTE_BYTES,cudaMemcpyHostToDevice ));
CUDA_CALL(cudaMemcpy(dev_tempGreenCentroid, greenCentroid,PALETTE_BYTES,cudaMemcpyHostToDevice ));
CUDA_CALL(cudaMemcpy(dev_tempBlueCentroid, blueCentroid,PALETTE_BYTES,cudaMemcpyHostToDevice ));
CUDA_CALL(cudaMemcpy(dev_labelArray, labelArray, IMAGE_BYTES, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(dev_sumRed, sumRed, PALETTE_BYTES, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(dev_sumGreen, sumGreen, PALETTE_BYTES, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(dev_sumBlue, sumBlue, PALETTE_BYTES, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(dev_pixelClusterCounter, pixelClusterCounter, PALETTE_BYTES, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpyToSymbol(dev_RedCentroid, redCentroid, PALETTE_BYTES));
CUDA_CALL(cudaMemcpyToSymbol(dev_GreenCentroid, greenCentroid, PALETTE_BYTES));
CUDA_CALL(cudaMemcpyToSymbol(dev_BlueCentroid, blueCentroid, PALETTE_BYTES));
CUDA_CALL(cudaMemcpyToSymbol(dev_nCentroids,&nCentroids, sizeof(int)));
CUDA_CALL(cudaMemcpyToSymbol(dev_size, &size, sizeof(int)));
// Clearing centroids on host
for(int i = 0; i < nCentroids; i++) {
redCentroid[i] = 0;
greenCentroid[i] = 0;
blueCentroid[i] = 0;
}
// Defining grid size
int BLOCK_X, BLOCK_Y;
BLOCK_X = ceil(width/BLOCK_SIZE);
BLOCK_Y = ceil(height/BLOCK_SIZE);
if(BLOCK_X > GRID_SIZE)
BLOCK_X = GRID_SIZE;
if(BLOCK_Y > GRID_SIZE)
BLOCK_Y = GRID_SIZE;
//2D Grid
//Minimum number of threads that can handle width�height pixels
dim3 dimGRID(BLOCK_X,BLOCK_Y);
//2D Block
//Each dimension is fixed
dim3 dimBLOCK(BLOCK_SIZE,BLOCK_SIZE);
//Starting timer
GpuTimer timer;
timer.Start();
printf("Launching K-Means Kernels.. \n");
//Iteration of kmeans algorithm
for(int i = 0; i < nIterations; i++) {
// Passing image RGB components, palette RGB components, label Array, number of Clusters
// Init arrays' values to 0
// Kernel needs only 1 block since nClusters
clearPaletteArrays<<<1, dimBLOCK>>>(dev_sumRed, dev_sumGreen, dev_sumBlue, dev_pixelClusterCounter, dev_tempRedCentroid, dev_tempGreenCentroid, dev_tempBlueCentroid);
// Init labelarray values to 0
clearLabelArray<<<dimGRID, dimBLOCK>>>(dev_labelArray);
// Calculates the distance from each pixel and all centroids
// Then saves the equivalent label in dev_labelArray
getClusterLabel<<< dimGRID, dimBLOCK >>> (dev_Red, dev_Green, dev_Blue,dev_labelArray);
//Sums RGB values in each Cluster
sumCluster<<<dimGRID, dimBLOCK>>> (dev_Red, dev_Green, dev_Blue, dev_sumRed, dev_sumGreen, dev_sumBlue, dev_labelArray,dev_pixelClusterCounter);
//Finds new RGB Centroids' values
newCentroids<<<1,dimBLOCK >>>(dev_tempRedCentroid, dev_tempGreenCentroid, dev_tempBlueCentroid, dev_sumRed, dev_sumGreen, dev_sumBlue, dev_pixelClusterCounter);
//Old RGB Centroids' values are in constant memory
//Updated RGB Centroids' values are in global memory
//We need a swap
CUDA_CALL(cudaMemcpy(redCentroid, dev_tempRedCentroid, PALETTE_BYTES,cudaMemcpyDeviceToHost));
CUDA_CALL(cudaMemcpy(greenCentroid, dev_tempGreenCentroid, PALETTE_BYTES,cudaMemcpyDeviceToHost));
CUDA_CALL(cudaMemcpy(blueCentroid, dev_tempBlueCentroid, PALETTE_BYTES,cudaMemcpyDeviceToHost));
//Uploading in constant memory updated RGB Centroids' values
CUDA_CALL(cudaMemcpyToSymbol(dev_RedCentroid, redCentroid, PALETTE_BYTES));
CUDA_CALL(cudaMemcpyToSymbol(dev_GreenCentroid, greenCentroid, PALETTE_BYTES));
CUDA_CALL(cudaMemcpyToSymbol(dev_BlueCentroid, blueCentroid, PALETTE_BYTES));
timer.Stop();
}
// DEBUG
CUDA_CALL(cudaMemcpy(labelArray, dev_labelArray, IMAGE_BYTES, cudaMemcpyDeviceToHost));
CUDA_CALL(cudaMemcpy(sumRed, dev_sumRed, PALETTE_BYTES, cudaMemcpyDeviceToHost));
CUDA_CALL(cudaMemcpy(sumGreen, dev_sumGreen, PALETTE_BYTES, cudaMemcpyDeviceToHost));
CUDA_CALL(cudaMemcpy(sumBlue, dev_sumBlue, PALETTE_BYTES, cudaMemcpyDeviceToHost));
CUDA_CALL(cudaMemcpy(pixelClusterCounter, dev_pixelClusterCounter, PALETTE_BYTES, cudaMemcpyDeviceToHost));
printf("Kmeans code ran in: %f msecs.\n", timer.Elapsed());
printf("\n");
// labelArray DEBUG
int counter = 0;
printf("Label Array:\n");
for(int i = 0; i < (size); i++) {
//printf("%d\n", labelArray[i]);
counter++;
}
printf("printing counter %d\n", counter);
counter = 0;
printf("Sum Arrays:\n");
for(int j = 0; j < nCentroids; j++) {
printf("r: %u g: %u b: %u \n", sumRed[j], sumGreen[j], sumBlue[j]);
counter++;
}
printf("\n");
printf("Pixels per centroids:\n");
for(int k = 0; k < nCentroids; k++){
printf("%d centroid: %d pixels\n", k, pixelClusterCounter[k]);
}
printf("\n");
printf("New centroids:\n");
for(int i = 0; i < nCentroids; i++) {
printf("%d, %d, %d \n", redCentroid[i], greenCentroid[i], blueCentroid[i]);
}
// writing...
printf("Image writing...\n");
if (writeRawImage(outputFile,labelArray, redCentroid, greenCentroid, blueCentroid, size)) {
printf("Image written!\n");
} else {
printf("NOT written!\n");
return -1;
}
free(red);
free(green);
free(blue);
free(redCentroid);
free(greenCentroid);
free(blueCentroid);
free(labelArray);
free(sumRed);
free(sumGreen);
free(sumBlue);
free(pixelClusterCounter);
CUDA_CALL(cudaFree(dev_Red));
CUDA_CALL(cudaFree(dev_Green));
CUDA_CALL(cudaFree(dev_Blue));
CUDA_CALL(cudaFree(dev_tempRedCentroid));
CUDA_CALL(cudaFree(dev_tempGreenCentroid));
CUDA_CALL(cudaFree(dev_tempBlueCentroid));
CUDA_CALL(cudaFree(dev_labelArray));
CUDA_CALL(cudaFree(dev_sumRed));
CUDA_CALL(cudaFree(dev_sumGreen));
CUDA_CALL(cudaFree(dev_sumBlue));
CUDA_CALL(cudaFree(dev_pixelClusterCounter));
printf("That's the end.\n");
return 0;
}
|
988b91f5dca362e9fac73ac835d2a36468ace566.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <float.h>
#include <cstdlib>
#include "../device/device.cu"
// __global__ void
// reduce0(float* g_idata,float* g_odata, unsigned int n) {
// extern __shared__ float temp[];
// int thid = threadIdx.x;
// temp[thid] = g_idata[thid];
// __syncthreads();
// for(int offset = 1;offset < n; offset *= 2) {
// if(thid >= offset)
// temp[thid] += temp[thid - offset];
// __syncthreads();
// }
// g_odata[thid] = temp[thid];
// }
// cluster assignment using randomization
__global__
void init_cluster_assignment(int k, int size, int * cluster_size, int * cluster_assignment){
for (int i=0; i<k; i++)
cluster_size[i] = 0;
int group = 0;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int random = index % k ;
for (int i=index; i<size; i+=stride){
group = (int) random;
cluster_assignment[i] = group;
cluster_size[group] += 1;
}
}
__global__
void update_clusters(int k, float ** cluster, const int * cluster_assignment, int data_size, \
int dimensions, float ** feature_vector,const int * cluster_size, int * response){
response[0] = 0;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
float ** temp;
temp = new float* [k];
for (int i=0; i<k; i++){
temp[i] = new float[dimensions];
for (int j=0; j<dimensions; j++){
temp[i][j] = (float) 0;
}
}
for (int i=0; i<data_size; i++){
for (int j=0; j<dimensions; j++){
temp[cluster_assignment[i]][j] += feature_vector[i][j];
}
}
for (int i=index; i<k; i+=stride){
if (cluster_size[i] == 0){
continue;
}
for (int j=0; j<dimensions; j++){
if (cluster[i][j] != temp[i][j]/cluster_size[i]){
response[0] = 1;
}
cluster[i][j] = temp[i][j]/cluster_size[i];
}
}
}
__global__
void update_cluster_assignment(int k, int * cluster_assignment, float ** cluster, int size, int dimension, float ** features){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i=index; i<size; i+=stride){
cluster_assignment[i] = find_nearest_center(k, features[i], dimension, cluster);
}
} | 988b91f5dca362e9fac73ac835d2a36468ace566.cu | #include <float.h>
#include <cstdlib>
#include "../device/device.cu"
// __global__ void
// reduce0(float* g_idata,float* g_odata, unsigned int n) {
// extern __shared__ float temp[];
// int thid = threadIdx.x;
// temp[thid] = g_idata[thid];
// __syncthreads();
// for(int offset = 1;offset < n; offset *= 2) {
// if(thid >= offset)
// temp[thid] += temp[thid - offset];
// __syncthreads();
// }
// g_odata[thid] = temp[thid];
// }
// cluster assignment using randomization
__global__
void init_cluster_assignment(int k, int size, int * cluster_size, int * cluster_assignment){
for (int i=0; i<k; i++)
cluster_size[i] = 0;
int group = 0;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int random = index % k ;
for (int i=index; i<size; i+=stride){
group = (int) random;
cluster_assignment[i] = group;
cluster_size[group] += 1;
}
}
__global__
void update_clusters(int k, float ** cluster, const int * cluster_assignment, int data_size, \
int dimensions, float ** feature_vector,const int * cluster_size, int * response){
response[0] = 0;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
float ** temp;
temp = new float* [k];
for (int i=0; i<k; i++){
temp[i] = new float[dimensions];
for (int j=0; j<dimensions; j++){
temp[i][j] = (float) 0;
}
}
for (int i=0; i<data_size; i++){
for (int j=0; j<dimensions; j++){
temp[cluster_assignment[i]][j] += feature_vector[i][j];
}
}
for (int i=index; i<k; i+=stride){
if (cluster_size[i] == 0){
continue;
}
for (int j=0; j<dimensions; j++){
if (cluster[i][j] != temp[i][j]/cluster_size[i]){
response[0] = 1;
}
cluster[i][j] = temp[i][j]/cluster_size[i];
}
}
}
__global__
void update_cluster_assignment(int k, int * cluster_assignment, float ** cluster, int size, int dimension, float ** features){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i=index; i<size; i+=stride){
cluster_assignment[i] = find_nearest_center(k, features[i], dimension, cluster);
}
} |
71af820f087b058c9a6b5e47b57d55cbfac22d6a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "des_kernel_encrypt.h"
#include "des_kernel_salt_instances.h"
#ifdef DESGPU_COMPILE_ALL_SALTS
void des_25_encrypt_salt1792(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 2, 3, 4, 3, 4, 21, 22, 23, 8, 15, 16, 17, 18, 19, 20, 19, 20, 5, 6, 7, 24, 63, 32, 33, 34, 35, 36, 35, 36, 53, 54, 55, 40, 47, 48, 49, 50, 51, 52, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1793(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 2, 3, 4, 3, 4, 21, 22, 23, 8, 31, 16, 17, 18, 19, 20, 19, 20, 5, 6, 7, 24, 47, 32, 33, 34, 35, 36, 35, 36, 53, 54, 55, 40, 63, 48, 49, 50, 51, 52, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1794(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 2, 3, 4, 3, 4, 21, 22, 23, 8, 15, 0, 17, 18, 19, 20, 19, 20, 5, 6, 7, 24, 63, 48, 33, 34, 35, 36, 35, 36, 53, 54, 55, 40, 47, 32, 49, 50, 51, 52, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1795(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 2, 3, 4, 3, 4, 21, 22, 23, 8, 31, 0, 17, 18, 19, 20, 19, 20, 5, 6, 7, 24, 47, 48, 33, 34, 35, 36, 35, 36, 53, 54, 55, 40, 63, 32, 49, 50, 51, 52, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1796(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 2, 3, 4, 3, 4, 21, 22, 23, 8, 15, 16, 1, 18, 19, 20, 19, 20, 5, 6, 7, 24, 63, 32, 49, 34, 35, 36, 35, 36, 53, 54, 55, 40, 47, 48, 33, 50, 51, 52, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1797(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 2, 3, 4, 3, 4, 21, 22, 23, 8, 31, 16, 1, 18, 19, 20, 19, 20, 5, 6, 7, 24, 47, 32, 49, 34, 35, 36, 35, 36, 53, 54, 55, 40, 63, 48, 33, 50, 51, 52, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1798(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 2, 3, 4, 3, 4, 21, 22, 23, 8, 15, 0, 1, 18, 19, 20, 19, 20, 5, 6, 7, 24, 63, 48, 49, 34, 35, 36, 35, 36, 53, 54, 55, 40, 47, 32, 33, 50, 51, 52, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1799(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 2, 3, 4, 3, 4, 21, 22, 23, 8, 31, 0, 1, 18, 19, 20, 19, 20, 5, 6, 7, 24, 47, 48, 49, 34, 35, 36, 35, 36, 53, 54, 55, 40, 63, 32, 33, 50, 51, 52, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1800(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 18, 3, 4, 3, 4, 21, 22, 23, 8, 15, 16, 17, 2, 19, 20, 19, 20, 5, 6, 7, 24, 63, 32, 33, 50, 35, 36, 35, 36, 53, 54, 55, 40, 47, 48, 49, 34, 51, 52, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1801(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 18, 3, 4, 3, 4, 21, 22, 23, 8, 31, 16, 17, 2, 19, 20, 19, 20, 5, 6, 7, 24, 47, 32, 33, 50, 35, 36, 35, 36, 53, 54, 55, 40, 63, 48, 49, 34, 51, 52, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1802(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 18, 3, 4, 3, 4, 21, 22, 23, 8, 15, 0, 17, 2, 19, 20, 19, 20, 5, 6, 7, 24, 63, 48, 33, 50, 35, 36, 35, 36, 53, 54, 55, 40, 47, 32, 49, 34, 51, 52, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1803(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 18, 3, 4, 3, 4, 21, 22, 23, 8, 31, 0, 17, 2, 19, 20, 19, 20, 5, 6, 7, 24, 47, 48, 33, 50, 35, 36, 35, 36, 53, 54, 55, 40, 63, 32, 49, 34, 51, 52, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1804(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 18, 3, 4, 3, 4, 21, 22, 23, 8, 15, 16, 1, 2, 19, 20, 19, 20, 5, 6, 7, 24, 63, 32, 49, 50, 35, 36, 35, 36, 53, 54, 55, 40, 47, 48, 33, 34, 51, 52, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1805(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 18, 3, 4, 3, 4, 21, 22, 23, 8, 31, 16, 1, 2, 19, 20, 19, 20, 5, 6, 7, 24, 47, 32, 49, 50, 35, 36, 35, 36, 53, 54, 55, 40, 63, 48, 33, 34, 51, 52, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1806(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 18, 3, 4, 3, 4, 21, 22, 23, 8, 15, 0, 1, 2, 19, 20, 19, 20, 5, 6, 7, 24, 63, 48, 49, 50, 35, 36, 35, 36, 53, 54, 55, 40, 47, 32, 33, 34, 51, 52, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1807(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 18, 3, 4, 3, 4, 21, 22, 23, 8, 31, 0, 1, 2, 19, 20, 19, 20, 5, 6, 7, 24, 47, 48, 49, 50, 35, 36, 35, 36, 53, 54, 55, 40, 63, 32, 33, 34, 51, 52, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1808(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 2, 19, 4, 3, 4, 21, 22, 23, 8, 15, 16, 17, 18, 3, 20, 19, 20, 5, 6, 7, 24, 63, 32, 33, 34, 51, 36, 35, 36, 53, 54, 55, 40, 47, 48, 49, 50, 35, 52, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1809(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 2, 19, 4, 3, 4, 21, 22, 23, 8, 31, 16, 17, 18, 3, 20, 19, 20, 5, 6, 7, 24, 47, 32, 33, 34, 51, 36, 35, 36, 53, 54, 55, 40, 63, 48, 49, 50, 35, 52, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1810(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 2, 19, 4, 3, 4, 21, 22, 23, 8, 15, 0, 17, 18, 3, 20, 19, 20, 5, 6, 7, 24, 63, 48, 33, 34, 51, 36, 35, 36, 53, 54, 55, 40, 47, 32, 49, 50, 35, 52, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1811(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 2, 19, 4, 3, 4, 21, 22, 23, 8, 31, 0, 17, 18, 3, 20, 19, 20, 5, 6, 7, 24, 47, 48, 33, 34, 51, 36, 35, 36, 53, 54, 55, 40, 63, 32, 49, 50, 35, 52, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1812(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 2, 19, 4, 3, 4, 21, 22, 23, 8, 15, 16, 1, 18, 3, 20, 19, 20, 5, 6, 7, 24, 63, 32, 49, 34, 51, 36, 35, 36, 53, 54, 55, 40, 47, 48, 33, 50, 35, 52, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1813(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 2, 19, 4, 3, 4, 21, 22, 23, 8, 31, 16, 1, 18, 3, 20, 19, 20, 5, 6, 7, 24, 47, 32, 49, 34, 51, 36, 35, 36, 53, 54, 55, 40, 63, 48, 33, 50, 35, 52, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1814(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 2, 19, 4, 3, 4, 21, 22, 23, 8, 15, 0, 1, 18, 3, 20, 19, 20, 5, 6, 7, 24, 63, 48, 49, 34, 51, 36, 35, 36, 53, 54, 55, 40, 47, 32, 33, 50, 35, 52, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1815(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 2, 19, 4, 3, 4, 21, 22, 23, 8, 31, 0, 1, 18, 3, 20, 19, 20, 5, 6, 7, 24, 47, 48, 49, 34, 51, 36, 35, 36, 53, 54, 55, 40, 63, 32, 33, 50, 35, 52, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1816(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 18, 19, 4, 3, 4, 21, 22, 23, 8, 15, 16, 17, 2, 3, 20, 19, 20, 5, 6, 7, 24, 63, 32, 33, 50, 51, 36, 35, 36, 53, 54, 55, 40, 47, 48, 49, 34, 35, 52, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1817(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 18, 19, 4, 3, 4, 21, 22, 23, 8, 31, 16, 17, 2, 3, 20, 19, 20, 5, 6, 7, 24, 47, 32, 33, 50, 51, 36, 35, 36, 53, 54, 55, 40, 63, 48, 49, 34, 35, 52, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1818(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 18, 19, 4, 3, 4, 21, 22, 23, 8, 15, 0, 17, 2, 3, 20, 19, 20, 5, 6, 7, 24, 63, 48, 33, 50, 51, 36, 35, 36, 53, 54, 55, 40, 47, 32, 49, 34, 35, 52, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1819(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 18, 19, 4, 3, 4, 21, 22, 23, 8, 31, 0, 17, 2, 3, 20, 19, 20, 5, 6, 7, 24, 47, 48, 33, 50, 51, 36, 35, 36, 53, 54, 55, 40, 63, 32, 49, 34, 35, 52, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1820(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 18, 19, 4, 3, 4, 21, 22, 23, 8, 15, 16, 1, 2, 3, 20, 19, 20, 5, 6, 7, 24, 63, 32, 49, 50, 51, 36, 35, 36, 53, 54, 55, 40, 47, 48, 33, 34, 35, 52, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1821(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 18, 19, 4, 3, 4, 21, 22, 23, 8, 31, 16, 1, 2, 3, 20, 19, 20, 5, 6, 7, 24, 47, 32, 49, 50, 51, 36, 35, 36, 53, 54, 55, 40, 63, 48, 33, 34, 35, 52, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1822(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 18, 19, 4, 3, 4, 21, 22, 23, 8, 15, 0, 1, 2, 3, 20, 19, 20, 5, 6, 7, 24, 63, 48, 49, 50, 51, 36, 35, 36, 53, 54, 55, 40, 47, 32, 33, 34, 35, 52, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1823(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 18, 19, 4, 3, 4, 21, 22, 23, 8, 31, 0, 1, 2, 3, 20, 19, 20, 5, 6, 7, 24, 47, 48, 49, 50, 51, 36, 35, 36, 53, 54, 55, 40, 63, 32, 33, 34, 35, 52, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1824(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 2, 3, 20, 3, 4, 21, 22, 23, 8, 15, 16, 17, 18, 19, 4, 19, 20, 5, 6, 7, 24, 63, 32, 33, 34, 35, 52, 35, 36, 53, 54, 55, 40, 47, 48, 49, 50, 51, 36, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1825(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 2, 3, 20, 3, 4, 21, 22, 23, 8, 31, 16, 17, 18, 19, 4, 19, 20, 5, 6, 7, 24, 47, 32, 33, 34, 35, 52, 35, 36, 53, 54, 55, 40, 63, 48, 49, 50, 51, 36, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1826(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 2, 3, 20, 3, 4, 21, 22, 23, 8, 15, 0, 17, 18, 19, 4, 19, 20, 5, 6, 7, 24, 63, 48, 33, 34, 35, 52, 35, 36, 53, 54, 55, 40, 47, 32, 49, 50, 51, 36, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1827(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 2, 3, 20, 3, 4, 21, 22, 23, 8, 31, 0, 17, 18, 19, 4, 19, 20, 5, 6, 7, 24, 47, 48, 33, 34, 35, 52, 35, 36, 53, 54, 55, 40, 63, 32, 49, 50, 51, 36, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1828(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 2, 3, 20, 3, 4, 21, 22, 23, 8, 15, 16, 1, 18, 19, 4, 19, 20, 5, 6, 7, 24, 63, 32, 49, 34, 35, 52, 35, 36, 53, 54, 55, 40, 47, 48, 33, 50, 51, 36, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1829(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 2, 3, 20, 3, 4, 21, 22, 23, 8, 31, 16, 1, 18, 19, 4, 19, 20, 5, 6, 7, 24, 47, 32, 49, 34, 35, 52, 35, 36, 53, 54, 55, 40, 63, 48, 33, 50, 51, 36, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1830(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 2, 3, 20, 3, 4, 21, 22, 23, 8, 15, 0, 1, 18, 19, 4, 19, 20, 5, 6, 7, 24, 63, 48, 49, 34, 35, 52, 35, 36, 53, 54, 55, 40, 47, 32, 33, 50, 51, 36, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1831(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 2, 3, 20, 3, 4, 21, 22, 23, 8, 31, 0, 1, 18, 19, 4, 19, 20, 5, 6, 7, 24, 47, 48, 49, 34, 35, 52, 35, 36, 53, 54, 55, 40, 63, 32, 33, 50, 51, 36, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1832(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 18, 3, 20, 3, 4, 21, 22, 23, 8, 15, 16, 17, 2, 19, 4, 19, 20, 5, 6, 7, 24, 63, 32, 33, 50, 35, 52, 35, 36, 53, 54, 55, 40, 47, 48, 49, 34, 51, 36, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1833(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 18, 3, 20, 3, 4, 21, 22, 23, 8, 31, 16, 17, 2, 19, 4, 19, 20, 5, 6, 7, 24, 47, 32, 33, 50, 35, 52, 35, 36, 53, 54, 55, 40, 63, 48, 49, 34, 51, 36, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1834(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 18, 3, 20, 3, 4, 21, 22, 23, 8, 15, 0, 17, 2, 19, 4, 19, 20, 5, 6, 7, 24, 63, 48, 33, 50, 35, 52, 35, 36, 53, 54, 55, 40, 47, 32, 49, 34, 51, 36, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1835(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 18, 3, 20, 3, 4, 21, 22, 23, 8, 31, 0, 17, 2, 19, 4, 19, 20, 5, 6, 7, 24, 47, 48, 33, 50, 35, 52, 35, 36, 53, 54, 55, 40, 63, 32, 49, 34, 51, 36, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1836(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 18, 3, 20, 3, 4, 21, 22, 23, 8, 15, 16, 1, 2, 19, 4, 19, 20, 5, 6, 7, 24, 63, 32, 49, 50, 35, 52, 35, 36, 53, 54, 55, 40, 47, 48, 33, 34, 51, 36, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1837(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 18, 3, 20, 3, 4, 21, 22, 23, 8, 31, 16, 1, 2, 19, 4, 19, 20, 5, 6, 7, 24, 47, 32, 49, 50, 35, 52, 35, 36, 53, 54, 55, 40, 63, 48, 33, 34, 51, 36, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1838(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 18, 3, 20, 3, 4, 21, 22, 23, 8, 15, 0, 1, 2, 19, 4, 19, 20, 5, 6, 7, 24, 63, 48, 49, 50, 35, 52, 35, 36, 53, 54, 55, 40, 47, 32, 33, 34, 51, 36, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1839(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 18, 3, 20, 3, 4, 21, 22, 23, 8, 31, 0, 1, 2, 19, 4, 19, 20, 5, 6, 7, 24, 47, 48, 49, 50, 35, 52, 35, 36, 53, 54, 55, 40, 63, 32, 33, 34, 51, 36, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1840(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 2, 19, 20, 3, 4, 21, 22, 23, 8, 15, 16, 17, 18, 3, 4, 19, 20, 5, 6, 7, 24, 63, 32, 33, 34, 51, 52, 35, 36, 53, 54, 55, 40, 47, 48, 49, 50, 35, 36, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1841(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 2, 19, 20, 3, 4, 21, 22, 23, 8, 31, 16, 17, 18, 3, 4, 19, 20, 5, 6, 7, 24, 47, 32, 33, 34, 51, 52, 35, 36, 53, 54, 55, 40, 63, 48, 49, 50, 35, 36, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1842(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 2, 19, 20, 3, 4, 21, 22, 23, 8, 15, 0, 17, 18, 3, 4, 19, 20, 5, 6, 7, 24, 63, 48, 33, 34, 51, 52, 35, 36, 53, 54, 55, 40, 47, 32, 49, 50, 35, 36, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1843(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 2, 19, 20, 3, 4, 21, 22, 23, 8, 31, 0, 17, 18, 3, 4, 19, 20, 5, 6, 7, 24, 47, 48, 33, 34, 51, 52, 35, 36, 53, 54, 55, 40, 63, 32, 49, 50, 35, 36, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1844(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 2, 19, 20, 3, 4, 21, 22, 23, 8, 15, 16, 1, 18, 3, 4, 19, 20, 5, 6, 7, 24, 63, 32, 49, 34, 51, 52, 35, 36, 53, 54, 55, 40, 47, 48, 33, 50, 35, 36, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1845(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 2, 19, 20, 3, 4, 21, 22, 23, 8, 31, 16, 1, 18, 3, 4, 19, 20, 5, 6, 7, 24, 47, 32, 49, 34, 51, 52, 35, 36, 53, 54, 55, 40, 63, 48, 33, 50, 35, 36, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1846(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 2, 19, 20, 3, 4, 21, 22, 23, 8, 15, 0, 1, 18, 3, 4, 19, 20, 5, 6, 7, 24, 63, 48, 49, 34, 51, 52, 35, 36, 53, 54, 55, 40, 47, 32, 33, 50, 35, 36, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1847(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 2, 19, 20, 3, 4, 21, 22, 23, 8, 31, 0, 1, 18, 3, 4, 19, 20, 5, 6, 7, 24, 47, 48, 49, 34, 51, 52, 35, 36, 53, 54, 55, 40, 63, 32, 33, 50, 35, 36, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1848(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 18, 19, 20, 3, 4, 21, 22, 23, 8, 15, 16, 17, 2, 3, 4, 19, 20, 5, 6, 7, 24, 63, 32, 33, 50, 51, 52, 35, 36, 53, 54, 55, 40, 47, 48, 49, 34, 35, 36, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1849(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 18, 19, 20, 3, 4, 21, 22, 23, 8, 31, 16, 17, 2, 3, 4, 19, 20, 5, 6, 7, 24, 47, 32, 33, 50, 51, 52, 35, 36, 53, 54, 55, 40, 63, 48, 49, 34, 35, 36, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1850(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 18, 19, 20, 3, 4, 21, 22, 23, 8, 15, 0, 17, 2, 3, 4, 19, 20, 5, 6, 7, 24, 63, 48, 33, 50, 51, 52, 35, 36, 53, 54, 55, 40, 47, 32, 49, 34, 35, 36, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1851(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 18, 19, 20, 3, 4, 21, 22, 23, 8, 31, 0, 17, 2, 3, 4, 19, 20, 5, 6, 7, 24, 47, 48, 33, 50, 51, 52, 35, 36, 53, 54, 55, 40, 63, 32, 49, 34, 35, 36, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1852(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 18, 19, 20, 3, 4, 21, 22, 23, 8, 15, 16, 1, 2, 3, 4, 19, 20, 5, 6, 7, 24, 63, 32, 49, 50, 51, 52, 35, 36, 53, 54, 55, 40, 47, 48, 33, 34, 35, 36, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1853(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 18, 19, 20, 3, 4, 21, 22, 23, 8, 31, 16, 1, 2, 3, 4, 19, 20, 5, 6, 7, 24, 47, 32, 49, 50, 51, 52, 35, 36, 53, 54, 55, 40, 63, 48, 33, 34, 35, 36, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1854(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 18, 19, 20, 3, 4, 21, 22, 23, 8, 15, 0, 1, 2, 3, 4, 19, 20, 5, 6, 7, 24, 63, 48, 49, 50, 51, 52, 35, 36, 53, 54, 55, 40, 47, 32, 33, 34, 35, 36, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1855(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 18, 19, 20, 3, 4, 21, 22, 23, 8, 31, 0, 1, 2, 3, 4, 19, 20, 5, 6, 7, 24, 47, 48, 49, 50, 51, 52, 35, 36, 53, 54, 55, 40, 63, 32, 33, 34, 35, 36, 51, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1856(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 2, 3, 4, 19, 4, 21, 22, 23, 8, 15, 16, 17, 18, 19, 20, 3, 20, 5, 6, 7, 24, 63, 32, 33, 34, 35, 36, 51, 36, 53, 54, 55, 40, 47, 48, 49, 50, 51, 52, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1857(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 2, 3, 4, 19, 4, 21, 22, 23, 8, 31, 16, 17, 18, 19, 20, 3, 20, 5, 6, 7, 24, 47, 32, 33, 34, 35, 36, 51, 36, 53, 54, 55, 40, 63, 48, 49, 50, 51, 52, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1858(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 2, 3, 4, 19, 4, 21, 22, 23, 8, 15, 0, 17, 18, 19, 20, 3, 20, 5, 6, 7, 24, 63, 48, 33, 34, 35, 36, 51, 36, 53, 54, 55, 40, 47, 32, 49, 50, 51, 52, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1859(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 2, 3, 4, 19, 4, 21, 22, 23, 8, 31, 0, 17, 18, 19, 20, 3, 20, 5, 6, 7, 24, 47, 48, 33, 34, 35, 36, 51, 36, 53, 54, 55, 40, 63, 32, 49, 50, 51, 52, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1860(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 2, 3, 4, 19, 4, 21, 22, 23, 8, 15, 16, 1, 18, 19, 20, 3, 20, 5, 6, 7, 24, 63, 32, 49, 34, 35, 36, 51, 36, 53, 54, 55, 40, 47, 48, 33, 50, 51, 52, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1861(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 2, 3, 4, 19, 4, 21, 22, 23, 8, 31, 16, 1, 18, 19, 20, 3, 20, 5, 6, 7, 24, 47, 32, 49, 34, 35, 36, 51, 36, 53, 54, 55, 40, 63, 48, 33, 50, 51, 52, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1862(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 2, 3, 4, 19, 4, 21, 22, 23, 8, 15, 0, 1, 18, 19, 20, 3, 20, 5, 6, 7, 24, 63, 48, 49, 34, 35, 36, 51, 36, 53, 54, 55, 40, 47, 32, 33, 50, 51, 52, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1863(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 2, 3, 4, 19, 4, 21, 22, 23, 8, 31, 0, 1, 18, 19, 20, 3, 20, 5, 6, 7, 24, 47, 48, 49, 34, 35, 36, 51, 36, 53, 54, 55, 40, 63, 32, 33, 50, 51, 52, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1864(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 18, 3, 4, 19, 4, 21, 22, 23, 8, 15, 16, 17, 2, 19, 20, 3, 20, 5, 6, 7, 24, 63, 32, 33, 50, 35, 36, 51, 36, 53, 54, 55, 40, 47, 48, 49, 34, 51, 52, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1865(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 18, 3, 4, 19, 4, 21, 22, 23, 8, 31, 16, 17, 2, 19, 20, 3, 20, 5, 6, 7, 24, 47, 32, 33, 50, 35, 36, 51, 36, 53, 54, 55, 40, 63, 48, 49, 34, 51, 52, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1866(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 18, 3, 4, 19, 4, 21, 22, 23, 8, 15, 0, 17, 2, 19, 20, 3, 20, 5, 6, 7, 24, 63, 48, 33, 50, 35, 36, 51, 36, 53, 54, 55, 40, 47, 32, 49, 34, 51, 52, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1867(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 18, 3, 4, 19, 4, 21, 22, 23, 8, 31, 0, 17, 2, 19, 20, 3, 20, 5, 6, 7, 24, 47, 48, 33, 50, 35, 36, 51, 36, 53, 54, 55, 40, 63, 32, 49, 34, 51, 52, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1868(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 18, 3, 4, 19, 4, 21, 22, 23, 8, 15, 16, 1, 2, 19, 20, 3, 20, 5, 6, 7, 24, 63, 32, 49, 50, 35, 36, 51, 36, 53, 54, 55, 40, 47, 48, 33, 34, 51, 52, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1869(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 18, 3, 4, 19, 4, 21, 22, 23, 8, 31, 16, 1, 2, 19, 20, 3, 20, 5, 6, 7, 24, 47, 32, 49, 50, 35, 36, 51, 36, 53, 54, 55, 40, 63, 48, 33, 34, 51, 52, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1870(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 18, 3, 4, 19, 4, 21, 22, 23, 8, 15, 0, 1, 2, 19, 20, 3, 20, 5, 6, 7, 24, 63, 48, 49, 50, 35, 36, 51, 36, 53, 54, 55, 40, 47, 32, 33, 34, 51, 52, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1871(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 18, 3, 4, 19, 4, 21, 22, 23, 8, 31, 0, 1, 2, 19, 20, 3, 20, 5, 6, 7, 24, 47, 48, 49, 50, 35, 36, 51, 36, 53, 54, 55, 40, 63, 32, 33, 34, 51, 52, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1872(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 2, 19, 4, 19, 4, 21, 22, 23, 8, 15, 16, 17, 18, 3, 20, 3, 20, 5, 6, 7, 24, 63, 32, 33, 34, 51, 36, 51, 36, 53, 54, 55, 40, 47, 48, 49, 50, 35, 52, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1873(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 2, 19, 4, 19, 4, 21, 22, 23, 8, 31, 16, 17, 18, 3, 20, 3, 20, 5, 6, 7, 24, 47, 32, 33, 34, 51, 36, 51, 36, 53, 54, 55, 40, 63, 48, 49, 50, 35, 52, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1874(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 2, 19, 4, 19, 4, 21, 22, 23, 8, 15, 0, 17, 18, 3, 20, 3, 20, 5, 6, 7, 24, 63, 48, 33, 34, 51, 36, 51, 36, 53, 54, 55, 40, 47, 32, 49, 50, 35, 52, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1875(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 2, 19, 4, 19, 4, 21, 22, 23, 8, 31, 0, 17, 18, 3, 20, 3, 20, 5, 6, 7, 24, 47, 48, 33, 34, 51, 36, 51, 36, 53, 54, 55, 40, 63, 32, 49, 50, 35, 52, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1876(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 2, 19, 4, 19, 4, 21, 22, 23, 8, 15, 16, 1, 18, 3, 20, 3, 20, 5, 6, 7, 24, 63, 32, 49, 34, 51, 36, 51, 36, 53, 54, 55, 40, 47, 48, 33, 50, 35, 52, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1877(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 2, 19, 4, 19, 4, 21, 22, 23, 8, 31, 16, 1, 18, 3, 20, 3, 20, 5, 6, 7, 24, 47, 32, 49, 34, 51, 36, 51, 36, 53, 54, 55, 40, 63, 48, 33, 50, 35, 52, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1878(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 2, 19, 4, 19, 4, 21, 22, 23, 8, 15, 0, 1, 18, 3, 20, 3, 20, 5, 6, 7, 24, 63, 48, 49, 34, 51, 36, 51, 36, 53, 54, 55, 40, 47, 32, 33, 50, 35, 52, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1879(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 2, 19, 4, 19, 4, 21, 22, 23, 8, 31, 0, 1, 18, 3, 20, 3, 20, 5, 6, 7, 24, 47, 48, 49, 34, 51, 36, 51, 36, 53, 54, 55, 40, 63, 32, 33, 50, 35, 52, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1880(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 18, 19, 4, 19, 4, 21, 22, 23, 8, 15, 16, 17, 2, 3, 20, 3, 20, 5, 6, 7, 24, 63, 32, 33, 50, 51, 36, 51, 36, 53, 54, 55, 40, 47, 48, 49, 34, 35, 52, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1881(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 18, 19, 4, 19, 4, 21, 22, 23, 8, 31, 16, 17, 2, 3, 20, 3, 20, 5, 6, 7, 24, 47, 32, 33, 50, 51, 36, 51, 36, 53, 54, 55, 40, 63, 48, 49, 34, 35, 52, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1882(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 18, 19, 4, 19, 4, 21, 22, 23, 8, 15, 0, 17, 2, 3, 20, 3, 20, 5, 6, 7, 24, 63, 48, 33, 50, 51, 36, 51, 36, 53, 54, 55, 40, 47, 32, 49, 34, 35, 52, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1883(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 18, 19, 4, 19, 4, 21, 22, 23, 8, 31, 0, 17, 2, 3, 20, 3, 20, 5, 6, 7, 24, 47, 48, 33, 50, 51, 36, 51, 36, 53, 54, 55, 40, 63, 32, 49, 34, 35, 52, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1884(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 18, 19, 4, 19, 4, 21, 22, 23, 8, 15, 16, 1, 2, 3, 20, 3, 20, 5, 6, 7, 24, 63, 32, 49, 50, 51, 36, 51, 36, 53, 54, 55, 40, 47, 48, 33, 34, 35, 52, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1885(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 18, 19, 4, 19, 4, 21, 22, 23, 8, 31, 16, 1, 2, 3, 20, 3, 20, 5, 6, 7, 24, 47, 32, 49, 50, 51, 36, 51, 36, 53, 54, 55, 40, 63, 48, 33, 34, 35, 52, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1886(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 18, 19, 4, 19, 4, 21, 22, 23, 8, 15, 0, 1, 2, 3, 20, 3, 20, 5, 6, 7, 24, 63, 48, 49, 50, 51, 36, 51, 36, 53, 54, 55, 40, 47, 32, 33, 34, 35, 52, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1887(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 18, 19, 4, 19, 4, 21, 22, 23, 8, 31, 0, 1, 2, 3, 20, 3, 20, 5, 6, 7, 24, 47, 48, 49, 50, 51, 36, 51, 36, 53, 54, 55, 40, 63, 32, 33, 34, 35, 52, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1888(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 2, 3, 20, 19, 4, 21, 22, 23, 8, 15, 16, 17, 18, 19, 4, 3, 20, 5, 6, 7, 24, 63, 32, 33, 34, 35, 52, 51, 36, 53, 54, 55, 40, 47, 48, 49, 50, 51, 36, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1889(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 2, 3, 20, 19, 4, 21, 22, 23, 8, 31, 16, 17, 18, 19, 4, 3, 20, 5, 6, 7, 24, 47, 32, 33, 34, 35, 52, 51, 36, 53, 54, 55, 40, 63, 48, 49, 50, 51, 36, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1890(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 2, 3, 20, 19, 4, 21, 22, 23, 8, 15, 0, 17, 18, 19, 4, 3, 20, 5, 6, 7, 24, 63, 48, 33, 34, 35, 52, 51, 36, 53, 54, 55, 40, 47, 32, 49, 50, 51, 36, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1891(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 2, 3, 20, 19, 4, 21, 22, 23, 8, 31, 0, 17, 18, 19, 4, 3, 20, 5, 6, 7, 24, 47, 48, 33, 34, 35, 52, 51, 36, 53, 54, 55, 40, 63, 32, 49, 50, 51, 36, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1892(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 2, 3, 20, 19, 4, 21, 22, 23, 8, 15, 16, 1, 18, 19, 4, 3, 20, 5, 6, 7, 24, 63, 32, 49, 34, 35, 52, 51, 36, 53, 54, 55, 40, 47, 48, 33, 50, 51, 36, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1893(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 2, 3, 20, 19, 4, 21, 22, 23, 8, 31, 16, 1, 18, 19, 4, 3, 20, 5, 6, 7, 24, 47, 32, 49, 34, 35, 52, 51, 36, 53, 54, 55, 40, 63, 48, 33, 50, 51, 36, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1894(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 2, 3, 20, 19, 4, 21, 22, 23, 8, 15, 0, 1, 18, 19, 4, 3, 20, 5, 6, 7, 24, 63, 48, 49, 34, 35, 52, 51, 36, 53, 54, 55, 40, 47, 32, 33, 50, 51, 36, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1895(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 2, 3, 20, 19, 4, 21, 22, 23, 8, 31, 0, 1, 18, 19, 4, 3, 20, 5, 6, 7, 24, 47, 48, 49, 34, 35, 52, 51, 36, 53, 54, 55, 40, 63, 32, 33, 50, 51, 36, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1896(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 18, 3, 20, 19, 4, 21, 22, 23, 8, 15, 16, 17, 2, 19, 4, 3, 20, 5, 6, 7, 24, 63, 32, 33, 50, 35, 52, 51, 36, 53, 54, 55, 40, 47, 48, 49, 34, 51, 36, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1897(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 18, 3, 20, 19, 4, 21, 22, 23, 8, 31, 16, 17, 2, 19, 4, 3, 20, 5, 6, 7, 24, 47, 32, 33, 50, 35, 52, 51, 36, 53, 54, 55, 40, 63, 48, 49, 34, 51, 36, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1898(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 18, 3, 20, 19, 4, 21, 22, 23, 8, 15, 0, 17, 2, 19, 4, 3, 20, 5, 6, 7, 24, 63, 48, 33, 50, 35, 52, 51, 36, 53, 54, 55, 40, 47, 32, 49, 34, 51, 36, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1899(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 18, 3, 20, 19, 4, 21, 22, 23, 8, 31, 0, 17, 2, 19, 4, 3, 20, 5, 6, 7, 24, 47, 48, 33, 50, 35, 52, 51, 36, 53, 54, 55, 40, 63, 32, 49, 34, 51, 36, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1900(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 18, 3, 20, 19, 4, 21, 22, 23, 8, 15, 16, 1, 2, 19, 4, 3, 20, 5, 6, 7, 24, 63, 32, 49, 50, 35, 52, 51, 36, 53, 54, 55, 40, 47, 48, 33, 34, 51, 36, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1901(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 18, 3, 20, 19, 4, 21, 22, 23, 8, 31, 16, 1, 2, 19, 4, 3, 20, 5, 6, 7, 24, 47, 32, 49, 50, 35, 52, 51, 36, 53, 54, 55, 40, 63, 48, 33, 34, 51, 36, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1902(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 18, 3, 20, 19, 4, 21, 22, 23, 8, 15, 0, 1, 2, 19, 4, 3, 20, 5, 6, 7, 24, 63, 48, 49, 50, 35, 52, 51, 36, 53, 54, 55, 40, 47, 32, 33, 34, 51, 36, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1903(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 18, 3, 20, 19, 4, 21, 22, 23, 8, 31, 0, 1, 2, 19, 4, 3, 20, 5, 6, 7, 24, 47, 48, 49, 50, 35, 52, 51, 36, 53, 54, 55, 40, 63, 32, 33, 34, 51, 36, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1904(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 2, 19, 20, 19, 4, 21, 22, 23, 8, 15, 16, 17, 18, 3, 4, 3, 20, 5, 6, 7, 24, 63, 32, 33, 34, 51, 52, 51, 36, 53, 54, 55, 40, 47, 48, 49, 50, 35, 36, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1905(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 2, 19, 20, 19, 4, 21, 22, 23, 8, 31, 16, 17, 18, 3, 4, 3, 20, 5, 6, 7, 24, 47, 32, 33, 34, 51, 52, 51, 36, 53, 54, 55, 40, 63, 48, 49, 50, 35, 36, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1906(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 2, 19, 20, 19, 4, 21, 22, 23, 8, 15, 0, 17, 18, 3, 4, 3, 20, 5, 6, 7, 24, 63, 48, 33, 34, 51, 52, 51, 36, 53, 54, 55, 40, 47, 32, 49, 50, 35, 36, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1907(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 2, 19, 20, 19, 4, 21, 22, 23, 8, 31, 0, 17, 18, 3, 4, 3, 20, 5, 6, 7, 24, 47, 48, 33, 34, 51, 52, 51, 36, 53, 54, 55, 40, 63, 32, 49, 50, 35, 36, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1908(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 2, 19, 20, 19, 4, 21, 22, 23, 8, 15, 16, 1, 18, 3, 4, 3, 20, 5, 6, 7, 24, 63, 32, 49, 34, 51, 52, 51, 36, 53, 54, 55, 40, 47, 48, 33, 50, 35, 36, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1909(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 2, 19, 20, 19, 4, 21, 22, 23, 8, 31, 16, 1, 18, 3, 4, 3, 20, 5, 6, 7, 24, 47, 32, 49, 34, 51, 52, 51, 36, 53, 54, 55, 40, 63, 48, 33, 50, 35, 36, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1910(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 2, 19, 20, 19, 4, 21, 22, 23, 8, 15, 0, 1, 18, 3, 4, 3, 20, 5, 6, 7, 24, 63, 48, 49, 34, 51, 52, 51, 36, 53, 54, 55, 40, 47, 32, 33, 50, 35, 36, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1911(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 2, 19, 20, 19, 4, 21, 22, 23, 8, 31, 0, 1, 18, 3, 4, 3, 20, 5, 6, 7, 24, 47, 48, 49, 34, 51, 52, 51, 36, 53, 54, 55, 40, 63, 32, 33, 50, 35, 36, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1912(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 18, 19, 20, 19, 4, 21, 22, 23, 8, 15, 16, 17, 2, 3, 4, 3, 20, 5, 6, 7, 24, 63, 32, 33, 50, 51, 52, 51, 36, 53, 54, 55, 40, 47, 48, 49, 34, 35, 36, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1913(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 18, 19, 20, 19, 4, 21, 22, 23, 8, 31, 16, 17, 2, 3, 4, 3, 20, 5, 6, 7, 24, 47, 32, 33, 50, 51, 52, 51, 36, 53, 54, 55, 40, 63, 48, 49, 34, 35, 36, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1914(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 18, 19, 20, 19, 4, 21, 22, 23, 8, 15, 0, 17, 2, 3, 4, 3, 20, 5, 6, 7, 24, 63, 48, 33, 50, 51, 52, 51, 36, 53, 54, 55, 40, 47, 32, 49, 34, 35, 36, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1915(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 18, 19, 20, 19, 4, 21, 22, 23, 8, 31, 0, 17, 2, 3, 4, 3, 20, 5, 6, 7, 24, 47, 48, 33, 50, 51, 52, 51, 36, 53, 54, 55, 40, 63, 32, 49, 34, 35, 36, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1916(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 18, 19, 20, 19, 4, 21, 22, 23, 8, 15, 16, 1, 2, 3, 4, 3, 20, 5, 6, 7, 24, 63, 32, 49, 50, 51, 52, 51, 36, 53, 54, 55, 40, 47, 48, 33, 34, 35, 36, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1917(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 18, 19, 20, 19, 4, 21, 22, 23, 8, 31, 16, 1, 2, 3, 4, 3, 20, 5, 6, 7, 24, 47, 32, 49, 50, 51, 52, 51, 36, 53, 54, 55, 40, 63, 48, 33, 34, 35, 36, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1918(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 18, 19, 20, 19, 4, 21, 22, 23, 8, 15, 0, 1, 2, 3, 4, 3, 20, 5, 6, 7, 24, 63, 48, 49, 50, 51, 52, 51, 36, 53, 54, 55, 40, 47, 32, 33, 34, 35, 36, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1919(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 18, 19, 20, 19, 4, 21, 22, 23, 8, 31, 0, 1, 2, 3, 4, 3, 20, 5, 6, 7, 24, 47, 48, 49, 50, 51, 52, 51, 36, 53, 54, 55, 40, 63, 32, 33, 34, 35, 36, 35, 52, 37, 38, 39, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
#endif // DESGPU_COMPILE_ALL_SALTS
| 71af820f087b058c9a6b5e47b57d55cbfac22d6a.cu | #include "des_kernel_encrypt.h"
#include "des_kernel_salt_instances.h"
#ifdef DESGPU_COMPILE_ALL_SALTS
void des_25_encrypt_salt1792(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 1, 2, 3, 4, 3, 4, 21, 22, 23, 8, 15, 16, 17, 18, 19, 20, 19, 20, 5, 6, 7, 24, 63, 32, 33, 34, 35, 36, 35, 36, 53, 54, 55, 40, 47, 48, 49, 50, 51, 52, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1793(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 1, 2, 3, 4, 3, 4, 21, 22, 23, 8, 31, 16, 17, 18, 19, 20, 19, 20, 5, 6, 7, 24, 47, 32, 33, 34, 35, 36, 35, 36, 53, 54, 55, 40, 63, 48, 49, 50, 51, 52, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1794(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 1, 2, 3, 4, 3, 4, 21, 22, 23, 8, 15, 0, 17, 18, 19, 20, 19, 20, 5, 6, 7, 24, 63, 48, 33, 34, 35, 36, 35, 36, 53, 54, 55, 40, 47, 32, 49, 50, 51, 52, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1795(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 1, 2, 3, 4, 3, 4, 21, 22, 23, 8, 31, 0, 17, 18, 19, 20, 19, 20, 5, 6, 7, 24, 47, 48, 33, 34, 35, 36, 35, 36, 53, 54, 55, 40, 63, 32, 49, 50, 51, 52, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1796(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 17, 2, 3, 4, 3, 4, 21, 22, 23, 8, 15, 16, 1, 18, 19, 20, 19, 20, 5, 6, 7, 24, 63, 32, 49, 34, 35, 36, 35, 36, 53, 54, 55, 40, 47, 48, 33, 50, 51, 52, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1797(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 17, 2, 3, 4, 3, 4, 21, 22, 23, 8, 31, 16, 1, 18, 19, 20, 19, 20, 5, 6, 7, 24, 47, 32, 49, 34, 35, 36, 35, 36, 53, 54, 55, 40, 63, 48, 33, 50, 51, 52, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1798(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 17, 2, 3, 4, 3, 4, 21, 22, 23, 8, 15, 0, 1, 18, 19, 20, 19, 20, 5, 6, 7, 24, 63, 48, 49, 34, 35, 36, 35, 36, 53, 54, 55, 40, 47, 32, 33, 50, 51, 52, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1799(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 17, 2, 3, 4, 3, 4, 21, 22, 23, 8, 31, 0, 1, 18, 19, 20, 19, 20, 5, 6, 7, 24, 47, 48, 49, 34, 35, 36, 35, 36, 53, 54, 55, 40, 63, 32, 33, 50, 51, 52, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1800(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 1, 18, 3, 4, 3, 4, 21, 22, 23, 8, 15, 16, 17, 2, 19, 20, 19, 20, 5, 6, 7, 24, 63, 32, 33, 50, 35, 36, 35, 36, 53, 54, 55, 40, 47, 48, 49, 34, 51, 52, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1801(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 1, 18, 3, 4, 3, 4, 21, 22, 23, 8, 31, 16, 17, 2, 19, 20, 19, 20, 5, 6, 7, 24, 47, 32, 33, 50, 35, 36, 35, 36, 53, 54, 55, 40, 63, 48, 49, 34, 51, 52, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1802(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 1, 18, 3, 4, 3, 4, 21, 22, 23, 8, 15, 0, 17, 2, 19, 20, 19, 20, 5, 6, 7, 24, 63, 48, 33, 50, 35, 36, 35, 36, 53, 54, 55, 40, 47, 32, 49, 34, 51, 52, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1803(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 1, 18, 3, 4, 3, 4, 21, 22, 23, 8, 31, 0, 17, 2, 19, 20, 19, 20, 5, 6, 7, 24, 47, 48, 33, 50, 35, 36, 35, 36, 53, 54, 55, 40, 63, 32, 49, 34, 51, 52, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1804(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 17, 18, 3, 4, 3, 4, 21, 22, 23, 8, 15, 16, 1, 2, 19, 20, 19, 20, 5, 6, 7, 24, 63, 32, 49, 50, 35, 36, 35, 36, 53, 54, 55, 40, 47, 48, 33, 34, 51, 52, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1805(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 17, 18, 3, 4, 3, 4, 21, 22, 23, 8, 31, 16, 1, 2, 19, 20, 19, 20, 5, 6, 7, 24, 47, 32, 49, 50, 35, 36, 35, 36, 53, 54, 55, 40, 63, 48, 33, 34, 51, 52, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1806(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 17, 18, 3, 4, 3, 4, 21, 22, 23, 8, 15, 0, 1, 2, 19, 20, 19, 20, 5, 6, 7, 24, 63, 48, 49, 50, 35, 36, 35, 36, 53, 54, 55, 40, 47, 32, 33, 34, 51, 52, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1807(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 17, 18, 3, 4, 3, 4, 21, 22, 23, 8, 31, 0, 1, 2, 19, 20, 19, 20, 5, 6, 7, 24, 47, 48, 49, 50, 35, 36, 35, 36, 53, 54, 55, 40, 63, 32, 33, 34, 51, 52, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1808(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 1, 2, 19, 4, 3, 4, 21, 22, 23, 8, 15, 16, 17, 18, 3, 20, 19, 20, 5, 6, 7, 24, 63, 32, 33, 34, 51, 36, 35, 36, 53, 54, 55, 40, 47, 48, 49, 50, 35, 52, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1809(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 1, 2, 19, 4, 3, 4, 21, 22, 23, 8, 31, 16, 17, 18, 3, 20, 19, 20, 5, 6, 7, 24, 47, 32, 33, 34, 51, 36, 35, 36, 53, 54, 55, 40, 63, 48, 49, 50, 35, 52, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1810(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 1, 2, 19, 4, 3, 4, 21, 22, 23, 8, 15, 0, 17, 18, 3, 20, 19, 20, 5, 6, 7, 24, 63, 48, 33, 34, 51, 36, 35, 36, 53, 54, 55, 40, 47, 32, 49, 50, 35, 52, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1811(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 1, 2, 19, 4, 3, 4, 21, 22, 23, 8, 31, 0, 17, 18, 3, 20, 19, 20, 5, 6, 7, 24, 47, 48, 33, 34, 51, 36, 35, 36, 53, 54, 55, 40, 63, 32, 49, 50, 35, 52, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1812(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 17, 2, 19, 4, 3, 4, 21, 22, 23, 8, 15, 16, 1, 18, 3, 20, 19, 20, 5, 6, 7, 24, 63, 32, 49, 34, 51, 36, 35, 36, 53, 54, 55, 40, 47, 48, 33, 50, 35, 52, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1813(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 17, 2, 19, 4, 3, 4, 21, 22, 23, 8, 31, 16, 1, 18, 3, 20, 19, 20, 5, 6, 7, 24, 47, 32, 49, 34, 51, 36, 35, 36, 53, 54, 55, 40, 63, 48, 33, 50, 35, 52, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1814(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 17, 2, 19, 4, 3, 4, 21, 22, 23, 8, 15, 0, 1, 18, 3, 20, 19, 20, 5, 6, 7, 24, 63, 48, 49, 34, 51, 36, 35, 36, 53, 54, 55, 40, 47, 32, 33, 50, 35, 52, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1815(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 17, 2, 19, 4, 3, 4, 21, 22, 23, 8, 31, 0, 1, 18, 3, 20, 19, 20, 5, 6, 7, 24, 47, 48, 49, 34, 51, 36, 35, 36, 53, 54, 55, 40, 63, 32, 33, 50, 35, 52, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1816(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 1, 18, 19, 4, 3, 4, 21, 22, 23, 8, 15, 16, 17, 2, 3, 20, 19, 20, 5, 6, 7, 24, 63, 32, 33, 50, 51, 36, 35, 36, 53, 54, 55, 40, 47, 48, 49, 34, 35, 52, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1817(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 1, 18, 19, 4, 3, 4, 21, 22, 23, 8, 31, 16, 17, 2, 3, 20, 19, 20, 5, 6, 7, 24, 47, 32, 33, 50, 51, 36, 35, 36, 53, 54, 55, 40, 63, 48, 49, 34, 35, 52, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1818(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 1, 18, 19, 4, 3, 4, 21, 22, 23, 8, 15, 0, 17, 2, 3, 20, 19, 20, 5, 6, 7, 24, 63, 48, 33, 50, 51, 36, 35, 36, 53, 54, 55, 40, 47, 32, 49, 34, 35, 52, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1819(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 1, 18, 19, 4, 3, 4, 21, 22, 23, 8, 31, 0, 17, 2, 3, 20, 19, 20, 5, 6, 7, 24, 47, 48, 33, 50, 51, 36, 35, 36, 53, 54, 55, 40, 63, 32, 49, 34, 35, 52, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1820(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 17, 18, 19, 4, 3, 4, 21, 22, 23, 8, 15, 16, 1, 2, 3, 20, 19, 20, 5, 6, 7, 24, 63, 32, 49, 50, 51, 36, 35, 36, 53, 54, 55, 40, 47, 48, 33, 34, 35, 52, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1821(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 17, 18, 19, 4, 3, 4, 21, 22, 23, 8, 31, 16, 1, 2, 3, 20, 19, 20, 5, 6, 7, 24, 47, 32, 49, 50, 51, 36, 35, 36, 53, 54, 55, 40, 63, 48, 33, 34, 35, 52, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1822(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 17, 18, 19, 4, 3, 4, 21, 22, 23, 8, 15, 0, 1, 2, 3, 20, 19, 20, 5, 6, 7, 24, 63, 48, 49, 50, 51, 36, 35, 36, 53, 54, 55, 40, 47, 32, 33, 34, 35, 52, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1823(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 17, 18, 19, 4, 3, 4, 21, 22, 23, 8, 31, 0, 1, 2, 3, 20, 19, 20, 5, 6, 7, 24, 47, 48, 49, 50, 51, 36, 35, 36, 53, 54, 55, 40, 63, 32, 33, 34, 35, 52, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1824(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 1, 2, 3, 20, 3, 4, 21, 22, 23, 8, 15, 16, 17, 18, 19, 4, 19, 20, 5, 6, 7, 24, 63, 32, 33, 34, 35, 52, 35, 36, 53, 54, 55, 40, 47, 48, 49, 50, 51, 36, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1825(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 1, 2, 3, 20, 3, 4, 21, 22, 23, 8, 31, 16, 17, 18, 19, 4, 19, 20, 5, 6, 7, 24, 47, 32, 33, 34, 35, 52, 35, 36, 53, 54, 55, 40, 63, 48, 49, 50, 51, 36, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1826(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 1, 2, 3, 20, 3, 4, 21, 22, 23, 8, 15, 0, 17, 18, 19, 4, 19, 20, 5, 6, 7, 24, 63, 48, 33, 34, 35, 52, 35, 36, 53, 54, 55, 40, 47, 32, 49, 50, 51, 36, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1827(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 1, 2, 3, 20, 3, 4, 21, 22, 23, 8, 31, 0, 17, 18, 19, 4, 19, 20, 5, 6, 7, 24, 47, 48, 33, 34, 35, 52, 35, 36, 53, 54, 55, 40, 63, 32, 49, 50, 51, 36, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1828(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 17, 2, 3, 20, 3, 4, 21, 22, 23, 8, 15, 16, 1, 18, 19, 4, 19, 20, 5, 6, 7, 24, 63, 32, 49, 34, 35, 52, 35, 36, 53, 54, 55, 40, 47, 48, 33, 50, 51, 36, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1829(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 17, 2, 3, 20, 3, 4, 21, 22, 23, 8, 31, 16, 1, 18, 19, 4, 19, 20, 5, 6, 7, 24, 47, 32, 49, 34, 35, 52, 35, 36, 53, 54, 55, 40, 63, 48, 33, 50, 51, 36, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1830(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 17, 2, 3, 20, 3, 4, 21, 22, 23, 8, 15, 0, 1, 18, 19, 4, 19, 20, 5, 6, 7, 24, 63, 48, 49, 34, 35, 52, 35, 36, 53, 54, 55, 40, 47, 32, 33, 50, 51, 36, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1831(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 17, 2, 3, 20, 3, 4, 21, 22, 23, 8, 31, 0, 1, 18, 19, 4, 19, 20, 5, 6, 7, 24, 47, 48, 49, 34, 35, 52, 35, 36, 53, 54, 55, 40, 63, 32, 33, 50, 51, 36, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1832(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 1, 18, 3, 20, 3, 4, 21, 22, 23, 8, 15, 16, 17, 2, 19, 4, 19, 20, 5, 6, 7, 24, 63, 32, 33, 50, 35, 52, 35, 36, 53, 54, 55, 40, 47, 48, 49, 34, 51, 36, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1833(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 1, 18, 3, 20, 3, 4, 21, 22, 23, 8, 31, 16, 17, 2, 19, 4, 19, 20, 5, 6, 7, 24, 47, 32, 33, 50, 35, 52, 35, 36, 53, 54, 55, 40, 63, 48, 49, 34, 51, 36, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1834(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 1, 18, 3, 20, 3, 4, 21, 22, 23, 8, 15, 0, 17, 2, 19, 4, 19, 20, 5, 6, 7, 24, 63, 48, 33, 50, 35, 52, 35, 36, 53, 54, 55, 40, 47, 32, 49, 34, 51, 36, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1835(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 1, 18, 3, 20, 3, 4, 21, 22, 23, 8, 31, 0, 17, 2, 19, 4, 19, 20, 5, 6, 7, 24, 47, 48, 33, 50, 35, 52, 35, 36, 53, 54, 55, 40, 63, 32, 49, 34, 51, 36, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1836(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 17, 18, 3, 20, 3, 4, 21, 22, 23, 8, 15, 16, 1, 2, 19, 4, 19, 20, 5, 6, 7, 24, 63, 32, 49, 50, 35, 52, 35, 36, 53, 54, 55, 40, 47, 48, 33, 34, 51, 36, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1837(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 17, 18, 3, 20, 3, 4, 21, 22, 23, 8, 31, 16, 1, 2, 19, 4, 19, 20, 5, 6, 7, 24, 47, 32, 49, 50, 35, 52, 35, 36, 53, 54, 55, 40, 63, 48, 33, 34, 51, 36, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1838(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 17, 18, 3, 20, 3, 4, 21, 22, 23, 8, 15, 0, 1, 2, 19, 4, 19, 20, 5, 6, 7, 24, 63, 48, 49, 50, 35, 52, 35, 36, 53, 54, 55, 40, 47, 32, 33, 34, 51, 36, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1839(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 17, 18, 3, 20, 3, 4, 21, 22, 23, 8, 31, 0, 1, 2, 19, 4, 19, 20, 5, 6, 7, 24, 47, 48, 49, 50, 35, 52, 35, 36, 53, 54, 55, 40, 63, 32, 33, 34, 51, 36, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1840(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 1, 2, 19, 20, 3, 4, 21, 22, 23, 8, 15, 16, 17, 18, 3, 4, 19, 20, 5, 6, 7, 24, 63, 32, 33, 34, 51, 52, 35, 36, 53, 54, 55, 40, 47, 48, 49, 50, 35, 36, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1841(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 1, 2, 19, 20, 3, 4, 21, 22, 23, 8, 31, 16, 17, 18, 3, 4, 19, 20, 5, 6, 7, 24, 47, 32, 33, 34, 51, 52, 35, 36, 53, 54, 55, 40, 63, 48, 49, 50, 35, 36, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1842(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 1, 2, 19, 20, 3, 4, 21, 22, 23, 8, 15, 0, 17, 18, 3, 4, 19, 20, 5, 6, 7, 24, 63, 48, 33, 34, 51, 52, 35, 36, 53, 54, 55, 40, 47, 32, 49, 50, 35, 36, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1843(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 1, 2, 19, 20, 3, 4, 21, 22, 23, 8, 31, 0, 17, 18, 3, 4, 19, 20, 5, 6, 7, 24, 47, 48, 33, 34, 51, 52, 35, 36, 53, 54, 55, 40, 63, 32, 49, 50, 35, 36, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1844(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 17, 2, 19, 20, 3, 4, 21, 22, 23, 8, 15, 16, 1, 18, 3, 4, 19, 20, 5, 6, 7, 24, 63, 32, 49, 34, 51, 52, 35, 36, 53, 54, 55, 40, 47, 48, 33, 50, 35, 36, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1845(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 17, 2, 19, 20, 3, 4, 21, 22, 23, 8, 31, 16, 1, 18, 3, 4, 19, 20, 5, 6, 7, 24, 47, 32, 49, 34, 51, 52, 35, 36, 53, 54, 55, 40, 63, 48, 33, 50, 35, 36, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1846(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 17, 2, 19, 20, 3, 4, 21, 22, 23, 8, 15, 0, 1, 18, 3, 4, 19, 20, 5, 6, 7, 24, 63, 48, 49, 34, 51, 52, 35, 36, 53, 54, 55, 40, 47, 32, 33, 50, 35, 36, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1847(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 17, 2, 19, 20, 3, 4, 21, 22, 23, 8, 31, 0, 1, 18, 3, 4, 19, 20, 5, 6, 7, 24, 47, 48, 49, 34, 51, 52, 35, 36, 53, 54, 55, 40, 63, 32, 33, 50, 35, 36, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1848(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 1, 18, 19, 20, 3, 4, 21, 22, 23, 8, 15, 16, 17, 2, 3, 4, 19, 20, 5, 6, 7, 24, 63, 32, 33, 50, 51, 52, 35, 36, 53, 54, 55, 40, 47, 48, 49, 34, 35, 36, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1849(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 1, 18, 19, 20, 3, 4, 21, 22, 23, 8, 31, 16, 17, 2, 3, 4, 19, 20, 5, 6, 7, 24, 47, 32, 33, 50, 51, 52, 35, 36, 53, 54, 55, 40, 63, 48, 49, 34, 35, 36, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1850(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 1, 18, 19, 20, 3, 4, 21, 22, 23, 8, 15, 0, 17, 2, 3, 4, 19, 20, 5, 6, 7, 24, 63, 48, 33, 50, 51, 52, 35, 36, 53, 54, 55, 40, 47, 32, 49, 34, 35, 36, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1851(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 1, 18, 19, 20, 3, 4, 21, 22, 23, 8, 31, 0, 17, 2, 3, 4, 19, 20, 5, 6, 7, 24, 47, 48, 33, 50, 51, 52, 35, 36, 53, 54, 55, 40, 63, 32, 49, 34, 35, 36, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1852(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 17, 18, 19, 20, 3, 4, 21, 22, 23, 8, 15, 16, 1, 2, 3, 4, 19, 20, 5, 6, 7, 24, 63, 32, 49, 50, 51, 52, 35, 36, 53, 54, 55, 40, 47, 48, 33, 34, 35, 36, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1853(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 17, 18, 19, 20, 3, 4, 21, 22, 23, 8, 31, 16, 1, 2, 3, 4, 19, 20, 5, 6, 7, 24, 47, 32, 49, 50, 51, 52, 35, 36, 53, 54, 55, 40, 63, 48, 33, 34, 35, 36, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1854(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 17, 18, 19, 20, 3, 4, 21, 22, 23, 8, 15, 0, 1, 2, 3, 4, 19, 20, 5, 6, 7, 24, 63, 48, 49, 50, 51, 52, 35, 36, 53, 54, 55, 40, 47, 32, 33, 34, 35, 36, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1855(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 17, 18, 19, 20, 3, 4, 21, 22, 23, 8, 31, 0, 1, 2, 3, 4, 19, 20, 5, 6, 7, 24, 47, 48, 49, 50, 51, 52, 35, 36, 53, 54, 55, 40, 63, 32, 33, 34, 35, 36, 51, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1856(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 1, 2, 3, 4, 19, 4, 21, 22, 23, 8, 15, 16, 17, 18, 19, 20, 3, 20, 5, 6, 7, 24, 63, 32, 33, 34, 35, 36, 51, 36, 53, 54, 55, 40, 47, 48, 49, 50, 51, 52, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1857(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 1, 2, 3, 4, 19, 4, 21, 22, 23, 8, 31, 16, 17, 18, 19, 20, 3, 20, 5, 6, 7, 24, 47, 32, 33, 34, 35, 36, 51, 36, 53, 54, 55, 40, 63, 48, 49, 50, 51, 52, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1858(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 1, 2, 3, 4, 19, 4, 21, 22, 23, 8, 15, 0, 17, 18, 19, 20, 3, 20, 5, 6, 7, 24, 63, 48, 33, 34, 35, 36, 51, 36, 53, 54, 55, 40, 47, 32, 49, 50, 51, 52, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1859(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 1, 2, 3, 4, 19, 4, 21, 22, 23, 8, 31, 0, 17, 18, 19, 20, 3, 20, 5, 6, 7, 24, 47, 48, 33, 34, 35, 36, 51, 36, 53, 54, 55, 40, 63, 32, 49, 50, 51, 52, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1860(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 17, 2, 3, 4, 19, 4, 21, 22, 23, 8, 15, 16, 1, 18, 19, 20, 3, 20, 5, 6, 7, 24, 63, 32, 49, 34, 35, 36, 51, 36, 53, 54, 55, 40, 47, 48, 33, 50, 51, 52, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1861(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 17, 2, 3, 4, 19, 4, 21, 22, 23, 8, 31, 16, 1, 18, 19, 20, 3, 20, 5, 6, 7, 24, 47, 32, 49, 34, 35, 36, 51, 36, 53, 54, 55, 40, 63, 48, 33, 50, 51, 52, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1862(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 17, 2, 3, 4, 19, 4, 21, 22, 23, 8, 15, 0, 1, 18, 19, 20, 3, 20, 5, 6, 7, 24, 63, 48, 49, 34, 35, 36, 51, 36, 53, 54, 55, 40, 47, 32, 33, 50, 51, 52, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1863(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 17, 2, 3, 4, 19, 4, 21, 22, 23, 8, 31, 0, 1, 18, 19, 20, 3, 20, 5, 6, 7, 24, 47, 48, 49, 34, 35, 36, 51, 36, 53, 54, 55, 40, 63, 32, 33, 50, 51, 52, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1864(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 1, 18, 3, 4, 19, 4, 21, 22, 23, 8, 15, 16, 17, 2, 19, 20, 3, 20, 5, 6, 7, 24, 63, 32, 33, 50, 35, 36, 51, 36, 53, 54, 55, 40, 47, 48, 49, 34, 51, 52, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1865(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 1, 18, 3, 4, 19, 4, 21, 22, 23, 8, 31, 16, 17, 2, 19, 20, 3, 20, 5, 6, 7, 24, 47, 32, 33, 50, 35, 36, 51, 36, 53, 54, 55, 40, 63, 48, 49, 34, 51, 52, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1866(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 1, 18, 3, 4, 19, 4, 21, 22, 23, 8, 15, 0, 17, 2, 19, 20, 3, 20, 5, 6, 7, 24, 63, 48, 33, 50, 35, 36, 51, 36, 53, 54, 55, 40, 47, 32, 49, 34, 51, 52, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1867(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 1, 18, 3, 4, 19, 4, 21, 22, 23, 8, 31, 0, 17, 2, 19, 20, 3, 20, 5, 6, 7, 24, 47, 48, 33, 50, 35, 36, 51, 36, 53, 54, 55, 40, 63, 32, 49, 34, 51, 52, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1868(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 17, 18, 3, 4, 19, 4, 21, 22, 23, 8, 15, 16, 1, 2, 19, 20, 3, 20, 5, 6, 7, 24, 63, 32, 49, 50, 35, 36, 51, 36, 53, 54, 55, 40, 47, 48, 33, 34, 51, 52, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1869(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 17, 18, 3, 4, 19, 4, 21, 22, 23, 8, 31, 16, 1, 2, 19, 20, 3, 20, 5, 6, 7, 24, 47, 32, 49, 50, 35, 36, 51, 36, 53, 54, 55, 40, 63, 48, 33, 34, 51, 52, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1870(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 17, 18, 3, 4, 19, 4, 21, 22, 23, 8, 15, 0, 1, 2, 19, 20, 3, 20, 5, 6, 7, 24, 63, 48, 49, 50, 35, 36, 51, 36, 53, 54, 55, 40, 47, 32, 33, 34, 51, 52, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1871(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 17, 18, 3, 4, 19, 4, 21, 22, 23, 8, 31, 0, 1, 2, 19, 20, 3, 20, 5, 6, 7, 24, 47, 48, 49, 50, 35, 36, 51, 36, 53, 54, 55, 40, 63, 32, 33, 34, 51, 52, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1872(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 1, 2, 19, 4, 19, 4, 21, 22, 23, 8, 15, 16, 17, 18, 3, 20, 3, 20, 5, 6, 7, 24, 63, 32, 33, 34, 51, 36, 51, 36, 53, 54, 55, 40, 47, 48, 49, 50, 35, 52, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1873(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 1, 2, 19, 4, 19, 4, 21, 22, 23, 8, 31, 16, 17, 18, 3, 20, 3, 20, 5, 6, 7, 24, 47, 32, 33, 34, 51, 36, 51, 36, 53, 54, 55, 40, 63, 48, 49, 50, 35, 52, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1874(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 1, 2, 19, 4, 19, 4, 21, 22, 23, 8, 15, 0, 17, 18, 3, 20, 3, 20, 5, 6, 7, 24, 63, 48, 33, 34, 51, 36, 51, 36, 53, 54, 55, 40, 47, 32, 49, 50, 35, 52, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1875(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 1, 2, 19, 4, 19, 4, 21, 22, 23, 8, 31, 0, 17, 18, 3, 20, 3, 20, 5, 6, 7, 24, 47, 48, 33, 34, 51, 36, 51, 36, 53, 54, 55, 40, 63, 32, 49, 50, 35, 52, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1876(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 17, 2, 19, 4, 19, 4, 21, 22, 23, 8, 15, 16, 1, 18, 3, 20, 3, 20, 5, 6, 7, 24, 63, 32, 49, 34, 51, 36, 51, 36, 53, 54, 55, 40, 47, 48, 33, 50, 35, 52, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1877(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 17, 2, 19, 4, 19, 4, 21, 22, 23, 8, 31, 16, 1, 18, 3, 20, 3, 20, 5, 6, 7, 24, 47, 32, 49, 34, 51, 36, 51, 36, 53, 54, 55, 40, 63, 48, 33, 50, 35, 52, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1878(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 17, 2, 19, 4, 19, 4, 21, 22, 23, 8, 15, 0, 1, 18, 3, 20, 3, 20, 5, 6, 7, 24, 63, 48, 49, 34, 51, 36, 51, 36, 53, 54, 55, 40, 47, 32, 33, 50, 35, 52, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1879(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 17, 2, 19, 4, 19, 4, 21, 22, 23, 8, 31, 0, 1, 18, 3, 20, 3, 20, 5, 6, 7, 24, 47, 48, 49, 34, 51, 36, 51, 36, 53, 54, 55, 40, 63, 32, 33, 50, 35, 52, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1880(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 1, 18, 19, 4, 19, 4, 21, 22, 23, 8, 15, 16, 17, 2, 3, 20, 3, 20, 5, 6, 7, 24, 63, 32, 33, 50, 51, 36, 51, 36, 53, 54, 55, 40, 47, 48, 49, 34, 35, 52, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1881(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 1, 18, 19, 4, 19, 4, 21, 22, 23, 8, 31, 16, 17, 2, 3, 20, 3, 20, 5, 6, 7, 24, 47, 32, 33, 50, 51, 36, 51, 36, 53, 54, 55, 40, 63, 48, 49, 34, 35, 52, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1882(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 1, 18, 19, 4, 19, 4, 21, 22, 23, 8, 15, 0, 17, 2, 3, 20, 3, 20, 5, 6, 7, 24, 63, 48, 33, 50, 51, 36, 51, 36, 53, 54, 55, 40, 47, 32, 49, 34, 35, 52, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1883(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 1, 18, 19, 4, 19, 4, 21, 22, 23, 8, 31, 0, 17, 2, 3, 20, 3, 20, 5, 6, 7, 24, 47, 48, 33, 50, 51, 36, 51, 36, 53, 54, 55, 40, 63, 32, 49, 34, 35, 52, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1884(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 17, 18, 19, 4, 19, 4, 21, 22, 23, 8, 15, 16, 1, 2, 3, 20, 3, 20, 5, 6, 7, 24, 63, 32, 49, 50, 51, 36, 51, 36, 53, 54, 55, 40, 47, 48, 33, 34, 35, 52, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1885(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 17, 18, 19, 4, 19, 4, 21, 22, 23, 8, 31, 16, 1, 2, 3, 20, 3, 20, 5, 6, 7, 24, 47, 32, 49, 50, 51, 36, 51, 36, 53, 54, 55, 40, 63, 48, 33, 34, 35, 52, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1886(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 17, 18, 19, 4, 19, 4, 21, 22, 23, 8, 15, 0, 1, 2, 3, 20, 3, 20, 5, 6, 7, 24, 63, 48, 49, 50, 51, 36, 51, 36, 53, 54, 55, 40, 47, 32, 33, 34, 35, 52, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1887(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 17, 18, 19, 4, 19, 4, 21, 22, 23, 8, 31, 0, 1, 2, 3, 20, 3, 20, 5, 6, 7, 24, 47, 48, 49, 50, 51, 36, 51, 36, 53, 54, 55, 40, 63, 32, 33, 34, 35, 52, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1888(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 1, 2, 3, 20, 19, 4, 21, 22, 23, 8, 15, 16, 17, 18, 19, 4, 3, 20, 5, 6, 7, 24, 63, 32, 33, 34, 35, 52, 51, 36, 53, 54, 55, 40, 47, 48, 49, 50, 51, 36, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1889(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 1, 2, 3, 20, 19, 4, 21, 22, 23, 8, 31, 16, 17, 18, 19, 4, 3, 20, 5, 6, 7, 24, 47, 32, 33, 34, 35, 52, 51, 36, 53, 54, 55, 40, 63, 48, 49, 50, 51, 36, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1890(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 1, 2, 3, 20, 19, 4, 21, 22, 23, 8, 15, 0, 17, 18, 19, 4, 3, 20, 5, 6, 7, 24, 63, 48, 33, 34, 35, 52, 51, 36, 53, 54, 55, 40, 47, 32, 49, 50, 51, 36, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1891(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 1, 2, 3, 20, 19, 4, 21, 22, 23, 8, 31, 0, 17, 18, 19, 4, 3, 20, 5, 6, 7, 24, 47, 48, 33, 34, 35, 52, 51, 36, 53, 54, 55, 40, 63, 32, 49, 50, 51, 36, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1892(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 17, 2, 3, 20, 19, 4, 21, 22, 23, 8, 15, 16, 1, 18, 19, 4, 3, 20, 5, 6, 7, 24, 63, 32, 49, 34, 35, 52, 51, 36, 53, 54, 55, 40, 47, 48, 33, 50, 51, 36, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1893(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 17, 2, 3, 20, 19, 4, 21, 22, 23, 8, 31, 16, 1, 18, 19, 4, 3, 20, 5, 6, 7, 24, 47, 32, 49, 34, 35, 52, 51, 36, 53, 54, 55, 40, 63, 48, 33, 50, 51, 36, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1894(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 17, 2, 3, 20, 19, 4, 21, 22, 23, 8, 15, 0, 1, 18, 19, 4, 3, 20, 5, 6, 7, 24, 63, 48, 49, 34, 35, 52, 51, 36, 53, 54, 55, 40, 47, 32, 33, 50, 51, 36, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1895(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 17, 2, 3, 20, 19, 4, 21, 22, 23, 8, 31, 0, 1, 18, 19, 4, 3, 20, 5, 6, 7, 24, 47, 48, 49, 34, 35, 52, 51, 36, 53, 54, 55, 40, 63, 32, 33, 50, 51, 36, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1896(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 1, 18, 3, 20, 19, 4, 21, 22, 23, 8, 15, 16, 17, 2, 19, 4, 3, 20, 5, 6, 7, 24, 63, 32, 33, 50, 35, 52, 51, 36, 53, 54, 55, 40, 47, 48, 49, 34, 51, 36, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1897(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 1, 18, 3, 20, 19, 4, 21, 22, 23, 8, 31, 16, 17, 2, 19, 4, 3, 20, 5, 6, 7, 24, 47, 32, 33, 50, 35, 52, 51, 36, 53, 54, 55, 40, 63, 48, 49, 34, 51, 36, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1898(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 1, 18, 3, 20, 19, 4, 21, 22, 23, 8, 15, 0, 17, 2, 19, 4, 3, 20, 5, 6, 7, 24, 63, 48, 33, 50, 35, 52, 51, 36, 53, 54, 55, 40, 47, 32, 49, 34, 51, 36, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1899(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 1, 18, 3, 20, 19, 4, 21, 22, 23, 8, 31, 0, 17, 2, 19, 4, 3, 20, 5, 6, 7, 24, 47, 48, 33, 50, 35, 52, 51, 36, 53, 54, 55, 40, 63, 32, 49, 34, 51, 36, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1900(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 17, 18, 3, 20, 19, 4, 21, 22, 23, 8, 15, 16, 1, 2, 19, 4, 3, 20, 5, 6, 7, 24, 63, 32, 49, 50, 35, 52, 51, 36, 53, 54, 55, 40, 47, 48, 33, 34, 51, 36, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1901(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 17, 18, 3, 20, 19, 4, 21, 22, 23, 8, 31, 16, 1, 2, 19, 4, 3, 20, 5, 6, 7, 24, 47, 32, 49, 50, 35, 52, 51, 36, 53, 54, 55, 40, 63, 48, 33, 34, 51, 36, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1902(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 17, 18, 3, 20, 19, 4, 21, 22, 23, 8, 15, 0, 1, 2, 19, 4, 3, 20, 5, 6, 7, 24, 63, 48, 49, 50, 35, 52, 51, 36, 53, 54, 55, 40, 47, 32, 33, 34, 51, 36, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1903(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 17, 18, 3, 20, 19, 4, 21, 22, 23, 8, 31, 0, 1, 2, 19, 4, 3, 20, 5, 6, 7, 24, 47, 48, 49, 50, 35, 52, 51, 36, 53, 54, 55, 40, 63, 32, 33, 34, 51, 36, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1904(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 1, 2, 19, 20, 19, 4, 21, 22, 23, 8, 15, 16, 17, 18, 3, 4, 3, 20, 5, 6, 7, 24, 63, 32, 33, 34, 51, 52, 51, 36, 53, 54, 55, 40, 47, 48, 49, 50, 35, 36, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1905(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 1, 2, 19, 20, 19, 4, 21, 22, 23, 8, 31, 16, 17, 18, 3, 4, 3, 20, 5, 6, 7, 24, 47, 32, 33, 34, 51, 52, 51, 36, 53, 54, 55, 40, 63, 48, 49, 50, 35, 36, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1906(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 1, 2, 19, 20, 19, 4, 21, 22, 23, 8, 15, 0, 17, 18, 3, 4, 3, 20, 5, 6, 7, 24, 63, 48, 33, 34, 51, 52, 51, 36, 53, 54, 55, 40, 47, 32, 49, 50, 35, 36, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1907(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 1, 2, 19, 20, 19, 4, 21, 22, 23, 8, 31, 0, 17, 18, 3, 4, 3, 20, 5, 6, 7, 24, 47, 48, 33, 34, 51, 52, 51, 36, 53, 54, 55, 40, 63, 32, 49, 50, 35, 36, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1908(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 17, 2, 19, 20, 19, 4, 21, 22, 23, 8, 15, 16, 1, 18, 3, 4, 3, 20, 5, 6, 7, 24, 63, 32, 49, 34, 51, 52, 51, 36, 53, 54, 55, 40, 47, 48, 33, 50, 35, 36, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1909(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 17, 2, 19, 20, 19, 4, 21, 22, 23, 8, 31, 16, 1, 18, 3, 4, 3, 20, 5, 6, 7, 24, 47, 32, 49, 34, 51, 52, 51, 36, 53, 54, 55, 40, 63, 48, 33, 50, 35, 36, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1910(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 17, 2, 19, 20, 19, 4, 21, 22, 23, 8, 15, 0, 1, 18, 3, 4, 3, 20, 5, 6, 7, 24, 63, 48, 49, 34, 51, 52, 51, 36, 53, 54, 55, 40, 47, 32, 33, 50, 35, 36, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1911(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 17, 2, 19, 20, 19, 4, 21, 22, 23, 8, 31, 0, 1, 18, 3, 4, 3, 20, 5, 6, 7, 24, 47, 48, 49, 34, 51, 52, 51, 36, 53, 54, 55, 40, 63, 32, 33, 50, 35, 36, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1912(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 1, 18, 19, 20, 19, 4, 21, 22, 23, 8, 15, 16, 17, 2, 3, 4, 3, 20, 5, 6, 7, 24, 63, 32, 33, 50, 51, 52, 51, 36, 53, 54, 55, 40, 47, 48, 49, 34, 35, 36, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1913(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 1, 18, 19, 20, 19, 4, 21, 22, 23, 8, 31, 16, 17, 2, 3, 4, 3, 20, 5, 6, 7, 24, 47, 32, 33, 50, 51, 52, 51, 36, 53, 54, 55, 40, 63, 48, 49, 34, 35, 36, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1914(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 1, 18, 19, 20, 19, 4, 21, 22, 23, 8, 15, 0, 17, 2, 3, 4, 3, 20, 5, 6, 7, 24, 63, 48, 33, 50, 51, 52, 51, 36, 53, 54, 55, 40, 47, 32, 49, 34, 35, 36, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1915(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 1, 18, 19, 20, 19, 4, 21, 22, 23, 8, 31, 0, 17, 2, 3, 4, 3, 20, 5, 6, 7, 24, 47, 48, 33, 50, 51, 52, 51, 36, 53, 54, 55, 40, 63, 32, 49, 34, 35, 36, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1916(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 17, 18, 19, 20, 19, 4, 21, 22, 23, 8, 15, 16, 1, 2, 3, 4, 3, 20, 5, 6, 7, 24, 63, 32, 49, 50, 51, 52, 51, 36, 53, 54, 55, 40, 47, 48, 33, 34, 35, 36, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1917(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 17, 18, 19, 20, 19, 4, 21, 22, 23, 8, 31, 16, 1, 2, 3, 4, 3, 20, 5, 6, 7, 24, 47, 32, 49, 50, 51, 52, 51, 36, 53, 54, 55, 40, 63, 48, 33, 34, 35, 36, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1918(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 17, 18, 19, 20, 19, 4, 21, 22, 23, 8, 15, 0, 1, 2, 3, 4, 3, 20, 5, 6, 7, 24, 63, 48, 49, 50, 51, 52, 51, 36, 53, 54, 55, 40, 47, 32, 33, 34, 35, 36, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt1919(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 17, 18, 19, 20, 19, 4, 21, 22, 23, 8, 31, 0, 1, 2, 3, 4, 3, 20, 5, 6, 7, 24, 47, 48, 49, 50, 51, 52, 51, 36, 53, 54, 55, 40, 63, 32, 33, 34, 35, 36, 35, 52, 37, 38, 39, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
#endif // DESGPU_COMPILE_ALL_SALTS
|
6d30bfb1ddeaa7d04ace7e7b898a896554995827.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <hip/hip_vector_types.h>
#include <optix_device.h>
#include <optix.h>
#include "OptiX7Craft.h"
#include "helpers.h"
extern "C" {
__constant__ Params params;
}
__inline__ __device__ float3 tonemap(const float3 in)
{
// hard coded exposure for sun/sky
const float exposure = 1.0f / 30.0f;
float3 x = exposure * in;
// "filmic" map from a GDC talk by John Hable. This includes 1/gamma.
x = fmaxf(x - make_float3(0.004f), make_float3(0.0f));
float3 ret = (x * (6.2f * x + make_float3(.5f))) / (x * (6.2f * x + make_float3(1.7f)) + make_float3(0.06f));
return ret;
}
static __device__ __inline__ float3 querySkyModel( bool CEL, const float3& direction )
{
PreethamSunSky sky = params.sky;
float3 overcast_sky_color = make_float3( 0.0f );
float3 sunlit_sky_color = make_float3( 0.0f );
// Preetham skylight model
if( sky.m_overcast < 1.0f ) {
float3 ray_direction = direction;
if( CEL && dot( ray_direction, sky.m_sun_dir) > 94.0f / sqrtf( 94.0f*94.0f + 0.45f*0.45f) ) {
sunlit_sky_color = sky.m_sun_color;
} else {
float inv_dir_dot_up = 1.f / dot( ray_direction, sky.m_up);
if(inv_dir_dot_up < 0.f) {
ray_direction = reflect(ray_direction, sky.m_up);
inv_dir_dot_up = -inv_dir_dot_up;
}
float gamma = dot(sky.m_sun_dir, ray_direction);
float acos_gamma = acosf(gamma);
float3 A = sky.m_c1 * inv_dir_dot_up;
float3 B = sky.m_c3 * acos_gamma;
float3 color_Yxy = ( make_float3( 1.0f ) + sky.m_c0*make_float3( expf( A.x ),expf( A.y ),expf( A.z ) ) ) *
( make_float3( 1.0f ) + sky.m_c2*make_float3( expf( B.x ),expf( B.y ),expf( B.z ) ) + sky.m_c4*gamma*gamma );
color_Yxy *= sky.m_inv_divisor_Yxy;
color_Yxy.y = 0.33f + 1.2f * ( color_Yxy.y - 0.33f ); // Pump up chromaticity a bit
color_Yxy.z = 0.33f + 1.2f * ( color_Yxy.z - 0.33f ); //
float3 color_XYZ = sky.Yxy2XYZ( color_Yxy );
sunlit_sky_color = sky.XYZ2rgb( color_XYZ );
sunlit_sky_color /= 1000.0f; // We are choosing to return kilo-candellas / meter^2
}
}
// CIE standard overcast sky model
float Y = 15.0f;
overcast_sky_color = make_float3( ( 1.0f + 2.0f * fabsf( direction.y ) ) / 3.0f * Y );
// return linear combo of the two
return lerp( sunlit_sky_color, overcast_sky_color, sky.m_overcast );
}
extern "C" __global__ void __miss__bg()
{
const MissData* sbt_data = (MissData*)optixGetSbtDataPointer();
SunPRD *prd = getPRD<SunPRD>();
const bool show_sun = (prd->depth == 0);
const float3 ray_dir = optixGetWorldRayDirection();
const float3 ray_orig = optixGetWorldRayOrigin();
const float3 ray_hit = ray_orig + ray_dir;
const float3 orig = make_float3(ray_orig.x, ray_orig.y - 0.6f, ray_orig.z);
float radius = 1.0f;
float3 texcoord = normalize(ray_hit-orig)/2;
float circle = params.circle;
float game_time = fmod(params.game_time,circle);
if (game_time >= circle /2.02) {
float3 skybox = make_float3(tex2D<float4>(sbt_data->night_map, texcoord.x + 0.5f, texcoord.z + 0.5f));
prd->radiance = skybox * params.ambient_light_color * 2.0f;
}
else if (game_time >= circle / 2.2) {
float3 skybox = make_float3(tex2D<float4>(sbt_data->night_map, texcoord.x + 0.5f, texcoord.z + 0.5f));
float3 color1 = skybox * params.ambient_light_color * 2.0f;
skybox = make_float3(tex2D<float4>(sbt_data->noon_map, texcoord.x + 0.5f, texcoord.z + 0.5f));
float3 color2 = skybox * params.ambient_light_color * 0.2f + tonemap(querySkyModel(show_sun, texcoord));
prd->radiance = lerp(color2, color1, (game_time - circle / 2.2) / (circle / 2.02 - circle / 2.2));
}
else if (game_time <= circle / 4 && game_time >= circle/20) {
float3 skybox = make_float3(tex2D<float4>(sbt_data->morning_map, texcoord.x + 0.5f, texcoord.z + 0.5f));
prd->radiance = skybox;
}
else if (game_time < circle / 20) {
float3 skybox = make_float3(tex2D<float4>(sbt_data->morning_map, texcoord.x + 0.5f, texcoord.z + 0.5f));
float3 color1 = skybox;
skybox = make_float3(tex2D<float4>(sbt_data->night_map, texcoord.x + 0.5f, texcoord.z + 0.5f));
float3 color2 = skybox * params.ambient_light_color * 2.0f;
prd->radiance = lerp(color2, color1, game_time / (circle / 20));
}
else if (game_time < circle / 3.5) {
float3 skybox = make_float3(tex2D<float4>(sbt_data->morning_map, texcoord.x + 0.5f, texcoord.z + 0.5f));
float3 color1 = skybox;
skybox = make_float3(tex2D<float4>(sbt_data->noon_map, texcoord.x + 0.5f, texcoord.z + 0.5f));
float3 color2 = skybox * params.ambient_light_color * 0.2f + tonemap(querySkyModel(show_sun, texcoord));
prd->radiance = lerp(color1, color2, (game_time-circle/4) / (circle /3.5 - circle/4 ));
}
else {
float3 skybox = make_float3(tex2D<float4>(sbt_data->noon_map, texcoord.x + 0.5f, texcoord.z + 0.5f));
prd->radiance = skybox * params.ambient_light_color * 0.2f + tonemap(querySkyModel(show_sun, ray_dir));
}
prd->done = true;
unsigned int u0, u1;
packPointer(&prd, u0, u1);
optixSetPayload_0(u0);
optixSetPayload_1(u1);
}
| 6d30bfb1ddeaa7d04ace7e7b898a896554995827.cu | #include <vector_types.h>
#include <optix_device.h>
#include <optix.h>
#include "OptiX7Craft.h"
#include "helpers.h"
extern "C" {
__constant__ Params params;
}
__inline__ __device__ float3 tonemap(const float3 in)
{
// hard coded exposure for sun/sky
const float exposure = 1.0f / 30.0f;
float3 x = exposure * in;
// "filmic" map from a GDC talk by John Hable. This includes 1/gamma.
x = fmaxf(x - make_float3(0.004f), make_float3(0.0f));
float3 ret = (x * (6.2f * x + make_float3(.5f))) / (x * (6.2f * x + make_float3(1.7f)) + make_float3(0.06f));
return ret;
}
static __device__ __inline__ float3 querySkyModel( bool CEL, const float3& direction )
{
PreethamSunSky sky = params.sky;
float3 overcast_sky_color = make_float3( 0.0f );
float3 sunlit_sky_color = make_float3( 0.0f );
// Preetham skylight model
if( sky.m_overcast < 1.0f ) {
float3 ray_direction = direction;
if( CEL && dot( ray_direction, sky.m_sun_dir) > 94.0f / sqrtf( 94.0f*94.0f + 0.45f*0.45f) ) {
sunlit_sky_color = sky.m_sun_color;
} else {
float inv_dir_dot_up = 1.f / dot( ray_direction, sky.m_up);
if(inv_dir_dot_up < 0.f) {
ray_direction = reflect(ray_direction, sky.m_up);
inv_dir_dot_up = -inv_dir_dot_up;
}
float gamma = dot(sky.m_sun_dir, ray_direction);
float acos_gamma = acosf(gamma);
float3 A = sky.m_c1 * inv_dir_dot_up;
float3 B = sky.m_c3 * acos_gamma;
float3 color_Yxy = ( make_float3( 1.0f ) + sky.m_c0*make_float3( expf( A.x ),expf( A.y ),expf( A.z ) ) ) *
( make_float3( 1.0f ) + sky.m_c2*make_float3( expf( B.x ),expf( B.y ),expf( B.z ) ) + sky.m_c4*gamma*gamma );
color_Yxy *= sky.m_inv_divisor_Yxy;
color_Yxy.y = 0.33f + 1.2f * ( color_Yxy.y - 0.33f ); // Pump up chromaticity a bit
color_Yxy.z = 0.33f + 1.2f * ( color_Yxy.z - 0.33f ); //
float3 color_XYZ = sky.Yxy2XYZ( color_Yxy );
sunlit_sky_color = sky.XYZ2rgb( color_XYZ );
sunlit_sky_color /= 1000.0f; // We are choosing to return kilo-candellas / meter^2
}
}
// CIE standard overcast sky model
float Y = 15.0f;
overcast_sky_color = make_float3( ( 1.0f + 2.0f * fabsf( direction.y ) ) / 3.0f * Y );
// return linear combo of the two
return lerp( sunlit_sky_color, overcast_sky_color, sky.m_overcast );
}
extern "C" __global__ void __miss__bg()
{
const MissData* sbt_data = (MissData*)optixGetSbtDataPointer();
SunPRD *prd = getPRD<SunPRD>();
const bool show_sun = (prd->depth == 0);
const float3 ray_dir = optixGetWorldRayDirection();
const float3 ray_orig = optixGetWorldRayOrigin();
const float3 ray_hit = ray_orig + ray_dir;
const float3 orig = make_float3(ray_orig.x, ray_orig.y - 0.6f, ray_orig.z);
float radius = 1.0f;
float3 texcoord = normalize(ray_hit-orig)/2;
float circle = params.circle;
float game_time = fmod(params.game_time,circle);
if (game_time >= circle /2.02) {
float3 skybox = make_float3(tex2D<float4>(sbt_data->night_map, texcoord.x + 0.5f, texcoord.z + 0.5f));
prd->radiance = skybox * params.ambient_light_color * 2.0f;
}
else if (game_time >= circle / 2.2) {
float3 skybox = make_float3(tex2D<float4>(sbt_data->night_map, texcoord.x + 0.5f, texcoord.z + 0.5f));
float3 color1 = skybox * params.ambient_light_color * 2.0f;
skybox = make_float3(tex2D<float4>(sbt_data->noon_map, texcoord.x + 0.5f, texcoord.z + 0.5f));
float3 color2 = skybox * params.ambient_light_color * 0.2f + tonemap(querySkyModel(show_sun, texcoord));
prd->radiance = lerp(color2, color1, (game_time - circle / 2.2) / (circle / 2.02 - circle / 2.2));
}
else if (game_time <= circle / 4 && game_time >= circle/20) {
float3 skybox = make_float3(tex2D<float4>(sbt_data->morning_map, texcoord.x + 0.5f, texcoord.z + 0.5f));
prd->radiance = skybox;
}
else if (game_time < circle / 20) {
float3 skybox = make_float3(tex2D<float4>(sbt_data->morning_map, texcoord.x + 0.5f, texcoord.z + 0.5f));
float3 color1 = skybox;
skybox = make_float3(tex2D<float4>(sbt_data->night_map, texcoord.x + 0.5f, texcoord.z + 0.5f));
float3 color2 = skybox * params.ambient_light_color * 2.0f;
prd->radiance = lerp(color2, color1, game_time / (circle / 20));
}
else if (game_time < circle / 3.5) {
float3 skybox = make_float3(tex2D<float4>(sbt_data->morning_map, texcoord.x + 0.5f, texcoord.z + 0.5f));
float3 color1 = skybox;
skybox = make_float3(tex2D<float4>(sbt_data->noon_map, texcoord.x + 0.5f, texcoord.z + 0.5f));
float3 color2 = skybox * params.ambient_light_color * 0.2f + tonemap(querySkyModel(show_sun, texcoord));
prd->radiance = lerp(color1, color2, (game_time-circle/4) / (circle /3.5 - circle/4 ));
}
else {
float3 skybox = make_float3(tex2D<float4>(sbt_data->noon_map, texcoord.x + 0.5f, texcoord.z + 0.5f));
prd->radiance = skybox * params.ambient_light_color * 0.2f + tonemap(querySkyModel(show_sun, ray_dir));
}
prd->done = true;
unsigned int u0, u1;
packPointer(&prd, u0, u1);
optixSetPayload_0(u0);
optixSetPayload_1(u1);
}
|
0ff1513d08defda119912f763c877b445d547cb8.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 32, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutSrc, int32_t, LayoutSrc, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, false>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const int8_t* d_src,
const int8_t* d_filter,
const int32_t* d_bias,
const int8_t* d_z,
int8_t* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| 0ff1513d08defda119912f763c877b445d547cb8.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 32, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutSrc, int32_t, LayoutSrc, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, false>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const int8_t* d_src,
const int8_t* d_filter,
const int32_t* d_bias,
const int8_t* d_z,
int8_t* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
6a256e8309ae04684b44fee5a17eea7121806a73.hip | // !!! This is a file automatically generated by hipify!!!
#include <string>
#include <algorithm>
#include <math.h>
#include <stdio.h>
#include <vector>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <driver_functions.h>
#include "cudaRenderer.h"
#include "image.h"
#include "noise.h"
#include "sceneLoader.h"
#include "util.h"
#include "circleBoxTest.cu_inl"
////////////////////////////////////////////////////////////////////////////////////////
// Putting all the cuda kernels here
///////////////////////////////////////////////////////////////////////////////////////
struct GlobalConstants {
SceneName sceneName;
int numCircles;
float* position;
float* velocity;
float* color;
float* radius;
int imageWidth;
int imageHeight;
float* imageData;
};
// Global variable that is in scope, but read-only, for all cuda
// kernels. The __constant__ modifier designates this variable will
// be stored in special "constant" memory on the GPU. (we didn't talk
// about this type of memory in class, but constant memory is a fast
// place to put read-only variables).
__constant__ GlobalConstants cuConstRendererParams;
// read-only lookup tables used to quickly compute noise (needed by
// advanceAnimation for the snowflake scene)
__constant__ int cuConstNoiseYPermutationTable[256];
__constant__ int cuConstNoiseXPermutationTable[256];
__constant__ float cuConstNoise1DValueTable[256];
// color ramp table needed for the color ramp lookup shader
#define COLOR_MAP_SIZE 5
__constant__ float cuConstColorRamp[COLOR_MAP_SIZE][3];
// including parts of the CUDA code from external files to keep this
// file simpler and to seperate code that should not be modified
#include "noiseCuda.cu_inl"
#include "lookupColor.cu_inl"
// kernelClearImageSnowflake -- (CUDA device code)
//
// Clear the image, setting the image to the white-gray gradation that
// is used in the snowflake image
__global__ void kernelClearImageSnowflake() {
int imageX = blockIdx.x * blockDim.x + threadIdx.x;
int imageY = blockIdx.y * blockDim.y + threadIdx.y;
int width = cuConstRendererParams.imageWidth;
int height = cuConstRendererParams.imageHeight;
if (imageX >= width || imageY >= height)
return;
int offset = 4 * (imageY * width + imageX);
float shade = .4f + .45f * static_cast<float>(height-imageY) / height;
float4 value = make_float4(shade, shade, shade, 1.f);
// write to global memory: As an optimization, I use a float4
// store, that results in more efficient code than if I coded this
// up as four seperate fp32 stores.
*(float4*)(&cuConstRendererParams.imageData[offset]) = value;
}
// kernelClearImage -- (CUDA device code)
//
// Clear the image, setting all pixels to the specified color rgba
__global__ void kernelClearImage(float r, float g, float b, float a) {
int imageX = blockIdx.x * blockDim.x + threadIdx.x;
int imageY = blockIdx.y * blockDim.y + threadIdx.y;
int width = cuConstRendererParams.imageWidth;
int height = cuConstRendererParams.imageHeight;
if (imageX >= width || imageY >= height)
return;
int offset = 4 * (imageY * width + imageX);
float4 value = make_float4(r, g, b, a);
// write to global memory: As an optimization, I use a float4
// store, that results in more efficient code than if I coded this
// up as four seperate fp32 stores.
*(float4*)(&cuConstRendererParams.imageData[offset]) = value;
}
// kernelAdvanceFireWorks
//
// Update the position of the fireworks (if circle is firework)
__global__ void kernelAdvanceFireWorks() {
const float dt = 1.f / 60.f;
const float pi = 3.14159;
const float maxDist = 0.25f;
float* velocity = cuConstRendererParams.velocity;
float* position = cuConstRendererParams.position;
float* radius = cuConstRendererParams.radius;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
if (0 <= index && index < NUM_FIREWORKS) { // firework center; no update
return;
}
// determine the fire-work center/spark indices
int fIdx = (index - NUM_FIREWORKS) / NUM_SPARKS;
int sfIdx = (index - NUM_FIREWORKS) % NUM_SPARKS;
int index3i = 3 * fIdx;
int sIdx = NUM_FIREWORKS + fIdx * NUM_SPARKS + sfIdx;
int index3j = 3 * sIdx;
float cx = position[index3i];
float cy = position[index3i+1];
// update position
position[index3j] += velocity[index3j] * dt;
position[index3j+1] += velocity[index3j+1] * dt;
// fire-work sparks
float sx = position[index3j];
float sy = position[index3j+1];
// compute vector from firework-spark
float cxsx = sx - cx;
float cysy = sy - cy;
// compute distance from fire-work
float dist = sqrt(cxsx * cxsx + cysy * cysy);
if (dist > maxDist) { // restore to starting position
// random starting position on fire-work's rim
float angle = (sfIdx * 2 * pi)/NUM_SPARKS;
float sinA = sin(angle);
float cosA = cos(angle);
float x = cosA * radius[fIdx];
float y = sinA * radius[fIdx];
position[index3j] = position[index3i] + x;
position[index3j+1] = position[index3i+1] + y;
position[index3j+2] = 0.0f;
// travel scaled unit length
velocity[index3j] = cosA/5.0;
velocity[index3j+1] = sinA/5.0;
velocity[index3j+2] = 0.0f;
}
}
// kernelAdvanceHypnosis
//
// Update the radius/color of the circles
__global__ void kernelAdvanceHypnosis() {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
float* radius = cuConstRendererParams.radius;
float cutOff = 0.5f;
// place circle back in center after reaching threshold radisus
if (radius[index] > cutOff) {
radius[index] = 0.02f;
} else {
radius[index] += 0.01f;
}
}
// kernelAdvanceBouncingBalls
//
// Update the positino of the balls
__global__ void kernelAdvanceBouncingBalls() {
const float dt = 1.f / 60.f;
const float kGravity = -2.8f; // sorry Newton
const float kDragCoeff = -0.8f;
const float epsilon = 0.001f;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
float* velocity = cuConstRendererParams.velocity;
float* position = cuConstRendererParams.position;
int index3 = 3 * index;
// reverse velocity if center position < 0
float oldVelocity = velocity[index3+1];
float oldPosition = position[index3+1];
if (oldVelocity == 0.f && oldPosition == 0.f) { // stop-condition
return;
}
if (position[index3+1] < 0 && oldVelocity < 0.f) { // bounce ball
velocity[index3+1] *= kDragCoeff;
}
// update velocity: v = u + at (only along y-axis)
velocity[index3+1] += kGravity * dt;
// update positions (only along y-axis)
position[index3+1] += velocity[index3+1] * dt;
if (fabsf(velocity[index3+1] - oldVelocity) < epsilon
&& oldPosition < 0.0f
&& fabsf(position[index3+1]-oldPosition) < epsilon) { // stop ball
velocity[index3+1] = 0.f;
position[index3+1] = 0.f;
}
}
// kernelAdvanceSnowflake -- (CUDA device code)
//
// move the snowflake animation forward one time step. Updates circle
// positions and velocities. Note how the position of the snowflake
// is reset if it moves off the left, right, or bottom of the screen.
__global__ void kernelAdvanceSnowflake() {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
const float dt = 1.f / 60.f;
const float kGravity = -1.8f; // sorry Newton
const float kDragCoeff = 2.f;
int index3 = 3 * index;
float* positionPtr = &cuConstRendererParams.position[index3];
float* velocityPtr = &cuConstRendererParams.velocity[index3];
// loads from global memory
float3 position = *((float3*)positionPtr);
float3 velocity = *((float3*)velocityPtr);
// hack to make farther circles move more slowly, giving the
// illusion of parallax
float forceScaling = fmin(fmax(1.f - position.z, .1f), 1.f); // clamp
// add some noise to the motion to make the snow flutter
float3 noiseInput;
noiseInput.x = 10.f * position.x;
noiseInput.y = 10.f * position.y;
noiseInput.z = 255.f * position.z;
float2 noiseForce = cudaVec2CellNoise(noiseInput, index);
noiseForce.x *= 7.5f;
noiseForce.y *= 5.f;
// drag
float2 dragForce;
dragForce.x = -1.f * kDragCoeff * velocity.x;
dragForce.y = -1.f * kDragCoeff * velocity.y;
// update positions
position.x += velocity.x * dt;
position.y += velocity.y * dt;
// update velocities
velocity.x += forceScaling * (noiseForce.x + dragForce.y) * dt;
velocity.y += forceScaling * (kGravity + noiseForce.y + dragForce.y) * dt;
float radius = cuConstRendererParams.radius[index];
// if the snowflake has moved off the left, right or bottom of
// the screen, place it back at the top and give it a
// pseudorandom x position and velocity.
if ( (position.y + radius < 0.f) ||
(position.x + radius) < -0.f ||
(position.x - radius) > 1.f)
{
noiseInput.x = 255.f * position.x;
noiseInput.y = 255.f * position.y;
noiseInput.z = 255.f * position.z;
noiseForce = cudaVec2CellNoise(noiseInput, index);
position.x = .5f + .5f * noiseForce.x;
position.y = 1.35f + radius;
// restart from 0 vertical velocity. Choose a
// pseudo-random horizontal velocity.
velocity.x = 2.f * noiseForce.y;
velocity.y = 0.f;
}
// store updated positions and velocities to global memory
*((float3*)positionPtr) = position;
*((float3*)velocityPtr) = velocity;
}
// shadePixel -- (CUDA device code)
//
// given a pixel and a circle, determines the contribution to the
// pixel from the circle. Update of the image is done in this
// function. Called by kernelRenderPixels()
__device__ __inline__ void
shadePixel(int circleIndex, float2 pixelCenter, float3 p, float4* imagePtr) {
// p : position of circle
float diffX = p.x - pixelCenter.x;
float diffY = p.y - pixelCenter.y;
float pixelDist = diffX * diffX + diffY * diffY;
float rad = cuConstRendererParams.radius[circleIndex];;
float maxDist = rad * rad;
// circle does not contribute to the image
if (pixelDist > maxDist)
return;
float3 rgb;
float alpha;
// there is a non-zero contribution. Now compute the shading value
// suggestion: This conditional is in the inner loop. Although it
// will evaluate the same for all threads, there is overhead in
// setting up the lane masks etc to implement the conditional. It
// would be wise to perform this logic outside of the loop next in
// kernelRenderPixels. (If feeling good about yourself, you
// could use some specialized template magic).
if (cuConstRendererParams.sceneName == SNOWFLAKES || cuConstRendererParams.sceneName == SNOWFLAKES_SINGLE_FRAME) {
const float kCircleMaxAlpha = .5f;
const float falloffScale = 4.f;
float normPixelDist = sqrt(pixelDist) / rad;
rgb = lookupColor(normPixelDist);
float maxAlpha = .6f + .4f * (1.f-p.z);
maxAlpha = kCircleMaxAlpha * fmaxf(fminf(maxAlpha, 1.f), 0.f); // kCircleMaxAlpha * clamped value
alpha = maxAlpha * exp(-1.f * falloffScale * normPixelDist * normPixelDist);
} else {
// simple: each circle has an assigned color
int index3 = 3 * circleIndex;
rgb = *(float3*)&(cuConstRendererParams.color[index3]);
alpha = .5f;
}
float oneMinusAlpha = 1.f - alpha;
// BEGIN SHOULD-BE-ATOMIC REGION
// global memory read
float4 existingColor = *imagePtr;
float4 newColor;
newColor.x = alpha * rgb.x + oneMinusAlpha * existingColor.x;
newColor.y = alpha * rgb.y + oneMinusAlpha * existingColor.y;
newColor.z = alpha * rgb.z + oneMinusAlpha * existingColor.z;
newColor.w = alpha + existingColor.w;
// global memory write
*imagePtr = newColor;
// END SHOULD-BE-ATOMIC REGION
}
//888
// kernelRenderPixels -- (CUDA device code)
//
// Each thread renders a circle. Since there is no protection to
// ensure order of update or mutual exclusion on the output image, the
// resulting image will be incorrect.
__global__ void kernelRenderPixels() {
int imageWidth = cuConstRendererParams.imageWidth;
int imageHeight = cuConstRendererParams.imageHeight;
int numCircles = cuConstRendererParams.numCircles;
int minX = blockIdx.x * blockDim.x; // inclusive
int minY = blockIdx.y * blockDim.y; // inclusive
int maxX = min(imageWidth,
(blockIdx.x + 1) * blockDim.x); // exclusive
int maxY = min(imageHeight,
(blockIdx.y + 1) * blockDim.y); // exclusive
// __share__
int pixelX = minX + threadIdx.x;
int pixelY = minY + threadIdx.y;
if (pixelX >= maxX || pixelY >= maxY) {
return;
}
float invWidth = 1.f / imageWidth;
float invHeight = 1.f / imageHeight;
float2 pixelCenterNorm = make_float2(invWidth * (static_cast<float>(pixelX) + 0.5f),
invHeight * (static_cast<float>(pixelY) + 0.5f));
float4* imgPtr = (float4*)(&cuConstRendererParams.imageData[4 * (pixelY * imageWidth + pixelX)]);
float boxL = invWidth * static_cast<float>(minX);
float boxR = invWidth * static_cast<float>(maxX-1);
float boxT = invHeight * static_cast<float>(minY);
float boxB = invHeight * static_cast<float>(maxY-1);
__shared__ int A[100];
__shared__ int K;
K = 0;
// for all pixels in the bonding box
if (threadIdx.x == 0 && threadIdx.y == 0) {
for (int i=0; i < numCircles; i++) {
// p : circle position
float3 p = *(float3*)&(cuConstRendererParams.position[3 * i]);
float radius = cuConstRendererParams.radius[i];
if (circleInBoxConservative(
p.x, p.y, radius, boxL, boxR, boxB, boxT)) {
A[K] = i;
K++;
}
}
}
__syncthreads();
for (int i=0; i < K; i++) {
float3 p = *(float3*)&(cuConstRendererParams.position[3 * A[i]]);
shadePixel(A[i], pixelCenterNorm, p, imgPtr);
}
// for (int i=0; i < numCircles; i++) {
// float3 p = *(float3*)&(cuConstRendererParams.position[3 * i]);
// float radius = cuConstRendererParams.radius[i];
//
//
// // p : circle position
// if (circleInBoxConservative(
// p.x, p.y, radius, boxL, boxR, boxB, boxT)) {
// shadePixel(i, pixelCenterNorm, p, imgPtr);
// }
// }
}
////////////////////////////////////////////////////////////////////////////////////////
CudaRenderer::CudaRenderer() {
image = NULL;
numCircles = 0;
position = NULL;
velocity = NULL;
color = NULL;
radius = NULL;
cudaDevicePosition = NULL;
cudaDeviceVelocity = NULL;
cudaDeviceColor = NULL;
cudaDeviceRadius = NULL;
cudaDeviceImageData = NULL;
}
CudaRenderer::~CudaRenderer() {
if (image) {
delete image;
}
if (position) {
delete [] position;
delete [] velocity;
delete [] color;
delete [] radius;
}
if (cudaDevicePosition) {
hipFree(cudaDevicePosition);
hipFree(cudaDeviceVelocity);
hipFree(cudaDeviceColor);
hipFree(cudaDeviceRadius);
hipFree(cudaDeviceImageData);
}
}
const Image*
CudaRenderer::getImage() {
// need to copy contents of the rendered image from device memory
// before we expose the Image object to the caller
printf("Copying image data from device\n");
hipMemcpy(image->data,
cudaDeviceImageData,
sizeof(float) * 4 * image->width * image->height,
hipMemcpyDeviceToHost);
return image;
}
void
CudaRenderer::loadScene(SceneName scene) {
sceneName = scene;
loadCircleScene(sceneName, numCircles, position, velocity, color, radius);
}
void
CudaRenderer::setup() {
int deviceCount = 0;
bool isFastGPU = false;
std::string name;
hipError_t err = hipGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Initializing CUDA for CudaRenderer\n");
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
hipDeviceProp_t deviceProps;
hipGetDeviceProperties(&deviceProps, i);
name = deviceProps.name;
if (name.compare("GeForce GTX 480") == 0
|| name.compare("GeForce GTX 670") == 0
|| name.compare("GeForce GTX 780") == 0)
{
isFastGPU = true;
}
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
}
printf("---------------------------------------------------------\n");
if (!isFastGPU)
{
printf("WARNING: "
"You're not running on a fast GPU, please consider using "
"NVIDIA GTX 480, 670 or 780.\n");
printf("---------------------------------------------------------\n");
}
// By this time the scene should be loaded. Now copy all the key
// data structures into device memory so they are accessible to
// CUDA kernels
//
// See the CUDA Programmer's Guide for descriptions of
// hipMalloc and hipMemcpy
hipMalloc(&cudaDevicePosition, sizeof(float) * 3 * numCircles);
hipMalloc(&cudaDeviceVelocity, sizeof(float) * 3 * numCircles);
hipMalloc(&cudaDeviceColor, sizeof(float) * 3 * numCircles);
hipMalloc(&cudaDeviceRadius, sizeof(float) * numCircles);
hipMalloc(&cudaDeviceImageData, sizeof(float) * 4 * image->width * image->height);
hipMemcpy(cudaDevicePosition, position, sizeof(float) * 3 * numCircles, hipMemcpyHostToDevice);
hipMemcpy(cudaDeviceVelocity, velocity, sizeof(float) * 3 * numCircles, hipMemcpyHostToDevice);
hipMemcpy(cudaDeviceColor, color, sizeof(float) * 3 * numCircles, hipMemcpyHostToDevice);
hipMemcpy(cudaDeviceRadius, radius, sizeof(float) * numCircles, hipMemcpyHostToDevice);
// Initialize parameters in constant memory. We didn't talk about
// constant memory in class, but the use of read-only constant
// memory here is an optimization over just sticking these values
// in device global memory. NVIDIA GPUs have a few special tricks
// for optimizing access to constant memory. Using global memory
// here would have worked just as well. See the Programmer's
// Guide for more information about constant memory.
GlobalConstants params;
params.sceneName = sceneName;
params.numCircles = numCircles;
params.imageWidth = image->width;
params.imageHeight = image->height;
params.position = cudaDevicePosition;
params.velocity = cudaDeviceVelocity;
params.color = cudaDeviceColor;
params.radius = cudaDeviceRadius;
params.imageData = cudaDeviceImageData;
hipMemcpyToSymbol(cuConstRendererParams, ¶ms, sizeof(GlobalConstants));
// also need to copy over the noise lookup tables, so we can
// implement noise on the GPU
int* permX;
int* permY;
float* value1D;
getNoiseTables(&permX, &permY, &value1D);
hipMemcpyToSymbol(cuConstNoiseXPermutationTable, permX, sizeof(int) * 256);
hipMemcpyToSymbol(cuConstNoiseYPermutationTable, permY, sizeof(int) * 256);
hipMemcpyToSymbol(cuConstNoise1DValueTable, value1D, sizeof(float) * 256);
// last, copy over the color table that's used by the shading
// function for circles in the snowflake demo
float lookupTable[COLOR_MAP_SIZE][3] = {
{1.f, 1.f, 1.f},
{1.f, 1.f, 1.f},
{.8f, .9f, 1.f},
{.8f, .9f, 1.f},
{.8f, 0.8f, 1.f},
};
hipMemcpyToSymbol(cuConstColorRamp, lookupTable, sizeof(float) * 3 * COLOR_MAP_SIZE);
}
// allocOutputImage --
//
// Allocate buffer the renderer will render into. Check status of
// image first to avoid memory leak.
void
CudaRenderer::allocOutputImage(int width, int height) {
if (image)
delete image;
image = new Image(width, height);
}
// clearImage --
//
// Clear's the renderer's target image. The state of the image after
// the clear depends on the scene being rendered.
void
CudaRenderer::clearImage() {
// 256 threads per block is a healthy number
dim3 blockDim(16, 16, 1);
dim3 gridDim(
(image->width + blockDim.x - 1) / blockDim.x,
(image->height + blockDim.y - 1) / blockDim.y);
if (sceneName == SNOWFLAKES || sceneName == SNOWFLAKES_SINGLE_FRAME) {
hipLaunchKernelGGL(( kernelClearImageSnowflake), dim3(gridDim), dim3(blockDim), 0, 0, );
} else {
hipLaunchKernelGGL(( kernelClearImage), dim3(gridDim), dim3(blockDim), 0, 0, 1.f, 1.f, 1.f, 1.f);
}
hipDeviceSynchronize();
}
// advanceAnimation --
//
// Advance the simulation one time step. Updates all circle positions
// and velocities
void
CudaRenderer::advanceAnimation() {
// 256 threads per block is a healthy number
dim3 blockDim(256, 1);
dim3 gridDim((numCircles + blockDim.x - 1) / blockDim.x);
// only the snowflake scene has animation
if (sceneName == SNOWFLAKES) {
hipLaunchKernelGGL(( kernelAdvanceSnowflake), dim3(gridDim), dim3(blockDim), 0, 0, );
} else if (sceneName == BOUNCING_BALLS) {
hipLaunchKernelGGL(( kernelAdvanceBouncingBalls), dim3(gridDim), dim3(blockDim), 0, 0, );
} else if (sceneName == HYPNOSIS) {
hipLaunchKernelGGL(( kernelAdvanceHypnosis), dim3(gridDim), dim3(blockDim), 0, 0, );
} else if (sceneName == FIREWORKS) {
hipLaunchKernelGGL(( kernelAdvanceFireWorks), dim3(gridDim), dim3(blockDim), 0, 0, );
}
hipDeviceSynchronize();
}
void
CudaRenderer::render() {
int imageWidth = image->width;
int imageHeight = image->height;
int numPixels = imageWidth * imageHeight;
printf("imageWidth = %d\n", imageWidth);
printf("imageHeight = %d\n", imageHeight);
printf("numPixels = %d\n", numPixels);
// pixels per block = 16 * 16
// int threadsPerBlock = blockDim.x * blockDim.y;
// numBlocks : (768 * 768) / (16 * 16) blocks
// int numBlocks = (numPixels + threadsPerBlock - 1) / threadsPerBlock;
// threadsPerBlock : 256 threads per block is a healthy number
dim3 blockDim(16, 16, 1);
dim3 gridDim(
(image->width + blockDim.x - 1) / blockDim.x,
(image->height + blockDim.y - 1) / blockDim.y);
// kernel for each pixel
hipLaunchKernelGGL(( kernelRenderPixels), dim3(gridDim), dim3(blockDim), 0, 0, );
hipDeviceSynchronize();
}
| 6a256e8309ae04684b44fee5a17eea7121806a73.cu | #include <string>
#include <algorithm>
#include <math.h>
#include <stdio.h>
#include <vector>
#include <cuda.h>
#include <cuda_runtime.h>
#include <driver_functions.h>
#include "cudaRenderer.h"
#include "image.h"
#include "noise.h"
#include "sceneLoader.h"
#include "util.h"
#include "circleBoxTest.cu_inl"
////////////////////////////////////////////////////////////////////////////////////////
// Putting all the cuda kernels here
///////////////////////////////////////////////////////////////////////////////////////
struct GlobalConstants {
SceneName sceneName;
int numCircles;
float* position;
float* velocity;
float* color;
float* radius;
int imageWidth;
int imageHeight;
float* imageData;
};
// Global variable that is in scope, but read-only, for all cuda
// kernels. The __constant__ modifier designates this variable will
// be stored in special "constant" memory on the GPU. (we didn't talk
// about this type of memory in class, but constant memory is a fast
// place to put read-only variables).
__constant__ GlobalConstants cuConstRendererParams;
// read-only lookup tables used to quickly compute noise (needed by
// advanceAnimation for the snowflake scene)
__constant__ int cuConstNoiseYPermutationTable[256];
__constant__ int cuConstNoiseXPermutationTable[256];
__constant__ float cuConstNoise1DValueTable[256];
// color ramp table needed for the color ramp lookup shader
#define COLOR_MAP_SIZE 5
__constant__ float cuConstColorRamp[COLOR_MAP_SIZE][3];
// including parts of the CUDA code from external files to keep this
// file simpler and to seperate code that should not be modified
#include "noiseCuda.cu_inl"
#include "lookupColor.cu_inl"
// kernelClearImageSnowflake -- (CUDA device code)
//
// Clear the image, setting the image to the white-gray gradation that
// is used in the snowflake image
__global__ void kernelClearImageSnowflake() {
int imageX = blockIdx.x * blockDim.x + threadIdx.x;
int imageY = blockIdx.y * blockDim.y + threadIdx.y;
int width = cuConstRendererParams.imageWidth;
int height = cuConstRendererParams.imageHeight;
if (imageX >= width || imageY >= height)
return;
int offset = 4 * (imageY * width + imageX);
float shade = .4f + .45f * static_cast<float>(height-imageY) / height;
float4 value = make_float4(shade, shade, shade, 1.f);
// write to global memory: As an optimization, I use a float4
// store, that results in more efficient code than if I coded this
// up as four seperate fp32 stores.
*(float4*)(&cuConstRendererParams.imageData[offset]) = value;
}
// kernelClearImage -- (CUDA device code)
//
// Clear the image, setting all pixels to the specified color rgba
__global__ void kernelClearImage(float r, float g, float b, float a) {
int imageX = blockIdx.x * blockDim.x + threadIdx.x;
int imageY = blockIdx.y * blockDim.y + threadIdx.y;
int width = cuConstRendererParams.imageWidth;
int height = cuConstRendererParams.imageHeight;
if (imageX >= width || imageY >= height)
return;
int offset = 4 * (imageY * width + imageX);
float4 value = make_float4(r, g, b, a);
// write to global memory: As an optimization, I use a float4
// store, that results in more efficient code than if I coded this
// up as four seperate fp32 stores.
*(float4*)(&cuConstRendererParams.imageData[offset]) = value;
}
// kernelAdvanceFireWorks
//
// Update the position of the fireworks (if circle is firework)
__global__ void kernelAdvanceFireWorks() {
const float dt = 1.f / 60.f;
const float pi = 3.14159;
const float maxDist = 0.25f;
float* velocity = cuConstRendererParams.velocity;
float* position = cuConstRendererParams.position;
float* radius = cuConstRendererParams.radius;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
if (0 <= index && index < NUM_FIREWORKS) { // firework center; no update
return;
}
// determine the fire-work center/spark indices
int fIdx = (index - NUM_FIREWORKS) / NUM_SPARKS;
int sfIdx = (index - NUM_FIREWORKS) % NUM_SPARKS;
int index3i = 3 * fIdx;
int sIdx = NUM_FIREWORKS + fIdx * NUM_SPARKS + sfIdx;
int index3j = 3 * sIdx;
float cx = position[index3i];
float cy = position[index3i+1];
// update position
position[index3j] += velocity[index3j] * dt;
position[index3j+1] += velocity[index3j+1] * dt;
// fire-work sparks
float sx = position[index3j];
float sy = position[index3j+1];
// compute vector from firework-spark
float cxsx = sx - cx;
float cysy = sy - cy;
// compute distance from fire-work
float dist = sqrt(cxsx * cxsx + cysy * cysy);
if (dist > maxDist) { // restore to starting position
// random starting position on fire-work's rim
float angle = (sfIdx * 2 * pi)/NUM_SPARKS;
float sinA = sin(angle);
float cosA = cos(angle);
float x = cosA * radius[fIdx];
float y = sinA * radius[fIdx];
position[index3j] = position[index3i] + x;
position[index3j+1] = position[index3i+1] + y;
position[index3j+2] = 0.0f;
// travel scaled unit length
velocity[index3j] = cosA/5.0;
velocity[index3j+1] = sinA/5.0;
velocity[index3j+2] = 0.0f;
}
}
// kernelAdvanceHypnosis
//
// Update the radius/color of the circles
__global__ void kernelAdvanceHypnosis() {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
float* radius = cuConstRendererParams.radius;
float cutOff = 0.5f;
// place circle back in center after reaching threshold radisus
if (radius[index] > cutOff) {
radius[index] = 0.02f;
} else {
radius[index] += 0.01f;
}
}
// kernelAdvanceBouncingBalls
//
// Update the positino of the balls
__global__ void kernelAdvanceBouncingBalls() {
const float dt = 1.f / 60.f;
const float kGravity = -2.8f; // sorry Newton
const float kDragCoeff = -0.8f;
const float epsilon = 0.001f;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
float* velocity = cuConstRendererParams.velocity;
float* position = cuConstRendererParams.position;
int index3 = 3 * index;
// reverse velocity if center position < 0
float oldVelocity = velocity[index3+1];
float oldPosition = position[index3+1];
if (oldVelocity == 0.f && oldPosition == 0.f) { // stop-condition
return;
}
if (position[index3+1] < 0 && oldVelocity < 0.f) { // bounce ball
velocity[index3+1] *= kDragCoeff;
}
// update velocity: v = u + at (only along y-axis)
velocity[index3+1] += kGravity * dt;
// update positions (only along y-axis)
position[index3+1] += velocity[index3+1] * dt;
if (fabsf(velocity[index3+1] - oldVelocity) < epsilon
&& oldPosition < 0.0f
&& fabsf(position[index3+1]-oldPosition) < epsilon) { // stop ball
velocity[index3+1] = 0.f;
position[index3+1] = 0.f;
}
}
// kernelAdvanceSnowflake -- (CUDA device code)
//
// move the snowflake animation forward one time step. Updates circle
// positions and velocities. Note how the position of the snowflake
// is reset if it moves off the left, right, or bottom of the screen.
__global__ void kernelAdvanceSnowflake() {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
const float dt = 1.f / 60.f;
const float kGravity = -1.8f; // sorry Newton
const float kDragCoeff = 2.f;
int index3 = 3 * index;
float* positionPtr = &cuConstRendererParams.position[index3];
float* velocityPtr = &cuConstRendererParams.velocity[index3];
// loads from global memory
float3 position = *((float3*)positionPtr);
float3 velocity = *((float3*)velocityPtr);
// hack to make farther circles move more slowly, giving the
// illusion of parallax
float forceScaling = fmin(fmax(1.f - position.z, .1f), 1.f); // clamp
// add some noise to the motion to make the snow flutter
float3 noiseInput;
noiseInput.x = 10.f * position.x;
noiseInput.y = 10.f * position.y;
noiseInput.z = 255.f * position.z;
float2 noiseForce = cudaVec2CellNoise(noiseInput, index);
noiseForce.x *= 7.5f;
noiseForce.y *= 5.f;
// drag
float2 dragForce;
dragForce.x = -1.f * kDragCoeff * velocity.x;
dragForce.y = -1.f * kDragCoeff * velocity.y;
// update positions
position.x += velocity.x * dt;
position.y += velocity.y * dt;
// update velocities
velocity.x += forceScaling * (noiseForce.x + dragForce.y) * dt;
velocity.y += forceScaling * (kGravity + noiseForce.y + dragForce.y) * dt;
float radius = cuConstRendererParams.radius[index];
// if the snowflake has moved off the left, right or bottom of
// the screen, place it back at the top and give it a
// pseudorandom x position and velocity.
if ( (position.y + radius < 0.f) ||
(position.x + radius) < -0.f ||
(position.x - radius) > 1.f)
{
noiseInput.x = 255.f * position.x;
noiseInput.y = 255.f * position.y;
noiseInput.z = 255.f * position.z;
noiseForce = cudaVec2CellNoise(noiseInput, index);
position.x = .5f + .5f * noiseForce.x;
position.y = 1.35f + radius;
// restart from 0 vertical velocity. Choose a
// pseudo-random horizontal velocity.
velocity.x = 2.f * noiseForce.y;
velocity.y = 0.f;
}
// store updated positions and velocities to global memory
*((float3*)positionPtr) = position;
*((float3*)velocityPtr) = velocity;
}
// shadePixel -- (CUDA device code)
//
// given a pixel and a circle, determines the contribution to the
// pixel from the circle. Update of the image is done in this
// function. Called by kernelRenderPixels()
__device__ __inline__ void
shadePixel(int circleIndex, float2 pixelCenter, float3 p, float4* imagePtr) {
// p : position of circle
float diffX = p.x - pixelCenter.x;
float diffY = p.y - pixelCenter.y;
float pixelDist = diffX * diffX + diffY * diffY;
float rad = cuConstRendererParams.radius[circleIndex];;
float maxDist = rad * rad;
// circle does not contribute to the image
if (pixelDist > maxDist)
return;
float3 rgb;
float alpha;
// there is a non-zero contribution. Now compute the shading value
// suggestion: This conditional is in the inner loop. Although it
// will evaluate the same for all threads, there is overhead in
// setting up the lane masks etc to implement the conditional. It
// would be wise to perform this logic outside of the loop next in
// kernelRenderPixels. (If feeling good about yourself, you
// could use some specialized template magic).
if (cuConstRendererParams.sceneName == SNOWFLAKES || cuConstRendererParams.sceneName == SNOWFLAKES_SINGLE_FRAME) {
const float kCircleMaxAlpha = .5f;
const float falloffScale = 4.f;
float normPixelDist = sqrt(pixelDist) / rad;
rgb = lookupColor(normPixelDist);
float maxAlpha = .6f + .4f * (1.f-p.z);
maxAlpha = kCircleMaxAlpha * fmaxf(fminf(maxAlpha, 1.f), 0.f); // kCircleMaxAlpha * clamped value
alpha = maxAlpha * exp(-1.f * falloffScale * normPixelDist * normPixelDist);
} else {
// simple: each circle has an assigned color
int index3 = 3 * circleIndex;
rgb = *(float3*)&(cuConstRendererParams.color[index3]);
alpha = .5f;
}
float oneMinusAlpha = 1.f - alpha;
// BEGIN SHOULD-BE-ATOMIC REGION
// global memory read
float4 existingColor = *imagePtr;
float4 newColor;
newColor.x = alpha * rgb.x + oneMinusAlpha * existingColor.x;
newColor.y = alpha * rgb.y + oneMinusAlpha * existingColor.y;
newColor.z = alpha * rgb.z + oneMinusAlpha * existingColor.z;
newColor.w = alpha + existingColor.w;
// global memory write
*imagePtr = newColor;
// END SHOULD-BE-ATOMIC REGION
}
//888
// kernelRenderPixels -- (CUDA device code)
//
// Each thread renders a circle. Since there is no protection to
// ensure order of update or mutual exclusion on the output image, the
// resulting image will be incorrect.
__global__ void kernelRenderPixels() {
int imageWidth = cuConstRendererParams.imageWidth;
int imageHeight = cuConstRendererParams.imageHeight;
int numCircles = cuConstRendererParams.numCircles;
int minX = blockIdx.x * blockDim.x; // inclusive
int minY = blockIdx.y * blockDim.y; // inclusive
int maxX = min(imageWidth,
(blockIdx.x + 1) * blockDim.x); // exclusive
int maxY = min(imageHeight,
(blockIdx.y + 1) * blockDim.y); // exclusive
// __share__
int pixelX = minX + threadIdx.x;
int pixelY = minY + threadIdx.y;
if (pixelX >= maxX || pixelY >= maxY) {
return;
}
float invWidth = 1.f / imageWidth;
float invHeight = 1.f / imageHeight;
float2 pixelCenterNorm = make_float2(invWidth * (static_cast<float>(pixelX) + 0.5f),
invHeight * (static_cast<float>(pixelY) + 0.5f));
float4* imgPtr = (float4*)(&cuConstRendererParams.imageData[4 * (pixelY * imageWidth + pixelX)]);
float boxL = invWidth * static_cast<float>(minX);
float boxR = invWidth * static_cast<float>(maxX-1);
float boxT = invHeight * static_cast<float>(minY);
float boxB = invHeight * static_cast<float>(maxY-1);
__shared__ int A[100];
__shared__ int K;
K = 0;
// for all pixels in the bonding box
if (threadIdx.x == 0 && threadIdx.y == 0) {
for (int i=0; i < numCircles; i++) {
// p : circle position
float3 p = *(float3*)&(cuConstRendererParams.position[3 * i]);
float radius = cuConstRendererParams.radius[i];
if (circleInBoxConservative(
p.x, p.y, radius, boxL, boxR, boxB, boxT)) {
A[K] = i;
K++;
}
}
}
__syncthreads();
for (int i=0; i < K; i++) {
float3 p = *(float3*)&(cuConstRendererParams.position[3 * A[i]]);
shadePixel(A[i], pixelCenterNorm, p, imgPtr);
}
// for (int i=0; i < numCircles; i++) {
// float3 p = *(float3*)&(cuConstRendererParams.position[3 * i]);
// float radius = cuConstRendererParams.radius[i];
//
//
// // p : circle position
// if (circleInBoxConservative(
// p.x, p.y, radius, boxL, boxR, boxB, boxT)) {
// shadePixel(i, pixelCenterNorm, p, imgPtr);
// }
// }
}
////////////////////////////////////////////////////////////////////////////////////////
CudaRenderer::CudaRenderer() {
image = NULL;
numCircles = 0;
position = NULL;
velocity = NULL;
color = NULL;
radius = NULL;
cudaDevicePosition = NULL;
cudaDeviceVelocity = NULL;
cudaDeviceColor = NULL;
cudaDeviceRadius = NULL;
cudaDeviceImageData = NULL;
}
CudaRenderer::~CudaRenderer() {
if (image) {
delete image;
}
if (position) {
delete [] position;
delete [] velocity;
delete [] color;
delete [] radius;
}
if (cudaDevicePosition) {
cudaFree(cudaDevicePosition);
cudaFree(cudaDeviceVelocity);
cudaFree(cudaDeviceColor);
cudaFree(cudaDeviceRadius);
cudaFree(cudaDeviceImageData);
}
}
const Image*
CudaRenderer::getImage() {
// need to copy contents of the rendered image from device memory
// before we expose the Image object to the caller
printf("Copying image data from device\n");
cudaMemcpy(image->data,
cudaDeviceImageData,
sizeof(float) * 4 * image->width * image->height,
cudaMemcpyDeviceToHost);
return image;
}
void
CudaRenderer::loadScene(SceneName scene) {
sceneName = scene;
loadCircleScene(sceneName, numCircles, position, velocity, color, radius);
}
void
CudaRenderer::setup() {
int deviceCount = 0;
bool isFastGPU = false;
std::string name;
cudaError_t err = cudaGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Initializing CUDA for CudaRenderer\n");
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
cudaDeviceProp deviceProps;
cudaGetDeviceProperties(&deviceProps, i);
name = deviceProps.name;
if (name.compare("GeForce GTX 480") == 0
|| name.compare("GeForce GTX 670") == 0
|| name.compare("GeForce GTX 780") == 0)
{
isFastGPU = true;
}
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
}
printf("---------------------------------------------------------\n");
if (!isFastGPU)
{
printf("WARNING: "
"You're not running on a fast GPU, please consider using "
"NVIDIA GTX 480, 670 or 780.\n");
printf("---------------------------------------------------------\n");
}
// By this time the scene should be loaded. Now copy all the key
// data structures into device memory so they are accessible to
// CUDA kernels
//
// See the CUDA Programmer's Guide for descriptions of
// cudaMalloc and cudaMemcpy
cudaMalloc(&cudaDevicePosition, sizeof(float) * 3 * numCircles);
cudaMalloc(&cudaDeviceVelocity, sizeof(float) * 3 * numCircles);
cudaMalloc(&cudaDeviceColor, sizeof(float) * 3 * numCircles);
cudaMalloc(&cudaDeviceRadius, sizeof(float) * numCircles);
cudaMalloc(&cudaDeviceImageData, sizeof(float) * 4 * image->width * image->height);
cudaMemcpy(cudaDevicePosition, position, sizeof(float) * 3 * numCircles, cudaMemcpyHostToDevice);
cudaMemcpy(cudaDeviceVelocity, velocity, sizeof(float) * 3 * numCircles, cudaMemcpyHostToDevice);
cudaMemcpy(cudaDeviceColor, color, sizeof(float) * 3 * numCircles, cudaMemcpyHostToDevice);
cudaMemcpy(cudaDeviceRadius, radius, sizeof(float) * numCircles, cudaMemcpyHostToDevice);
// Initialize parameters in constant memory. We didn't talk about
// constant memory in class, but the use of read-only constant
// memory here is an optimization over just sticking these values
// in device global memory. NVIDIA GPUs have a few special tricks
// for optimizing access to constant memory. Using global memory
// here would have worked just as well. See the Programmer's
// Guide for more information about constant memory.
GlobalConstants params;
params.sceneName = sceneName;
params.numCircles = numCircles;
params.imageWidth = image->width;
params.imageHeight = image->height;
params.position = cudaDevicePosition;
params.velocity = cudaDeviceVelocity;
params.color = cudaDeviceColor;
params.radius = cudaDeviceRadius;
params.imageData = cudaDeviceImageData;
cudaMemcpyToSymbol(cuConstRendererParams, ¶ms, sizeof(GlobalConstants));
// also need to copy over the noise lookup tables, so we can
// implement noise on the GPU
int* permX;
int* permY;
float* value1D;
getNoiseTables(&permX, &permY, &value1D);
cudaMemcpyToSymbol(cuConstNoiseXPermutationTable, permX, sizeof(int) * 256);
cudaMemcpyToSymbol(cuConstNoiseYPermutationTable, permY, sizeof(int) * 256);
cudaMemcpyToSymbol(cuConstNoise1DValueTable, value1D, sizeof(float) * 256);
// last, copy over the color table that's used by the shading
// function for circles in the snowflake demo
float lookupTable[COLOR_MAP_SIZE][3] = {
{1.f, 1.f, 1.f},
{1.f, 1.f, 1.f},
{.8f, .9f, 1.f},
{.8f, .9f, 1.f},
{.8f, 0.8f, 1.f},
};
cudaMemcpyToSymbol(cuConstColorRamp, lookupTable, sizeof(float) * 3 * COLOR_MAP_SIZE);
}
// allocOutputImage --
//
// Allocate buffer the renderer will render into. Check status of
// image first to avoid memory leak.
void
CudaRenderer::allocOutputImage(int width, int height) {
if (image)
delete image;
image = new Image(width, height);
}
// clearImage --
//
// Clear's the renderer's target image. The state of the image after
// the clear depends on the scene being rendered.
void
CudaRenderer::clearImage() {
// 256 threads per block is a healthy number
dim3 blockDim(16, 16, 1);
dim3 gridDim(
(image->width + blockDim.x - 1) / blockDim.x,
(image->height + blockDim.y - 1) / blockDim.y);
if (sceneName == SNOWFLAKES || sceneName == SNOWFLAKES_SINGLE_FRAME) {
kernelClearImageSnowflake<<<gridDim, blockDim>>>();
} else {
kernelClearImage<<<gridDim, blockDim>>>(1.f, 1.f, 1.f, 1.f);
}
cudaThreadSynchronize();
}
// advanceAnimation --
//
// Advance the simulation one time step. Updates all circle positions
// and velocities
void
CudaRenderer::advanceAnimation() {
// 256 threads per block is a healthy number
dim3 blockDim(256, 1);
dim3 gridDim((numCircles + blockDim.x - 1) / blockDim.x);
// only the snowflake scene has animation
if (sceneName == SNOWFLAKES) {
kernelAdvanceSnowflake<<<gridDim, blockDim>>>();
} else if (sceneName == BOUNCING_BALLS) {
kernelAdvanceBouncingBalls<<<gridDim, blockDim>>>();
} else if (sceneName == HYPNOSIS) {
kernelAdvanceHypnosis<<<gridDim, blockDim>>>();
} else if (sceneName == FIREWORKS) {
kernelAdvanceFireWorks<<<gridDim, blockDim>>>();
}
cudaThreadSynchronize();
}
void
CudaRenderer::render() {
int imageWidth = image->width;
int imageHeight = image->height;
int numPixels = imageWidth * imageHeight;
printf("imageWidth = %d\n", imageWidth);
printf("imageHeight = %d\n", imageHeight);
printf("numPixels = %d\n", numPixels);
// pixels per block = 16 * 16
// int threadsPerBlock = blockDim.x * blockDim.y;
// numBlocks : (768 * 768) / (16 * 16) blocks
// int numBlocks = (numPixels + threadsPerBlock - 1) / threadsPerBlock;
// threadsPerBlock : 256 threads per block is a healthy number
dim3 blockDim(16, 16, 1);
dim3 gridDim(
(image->width + blockDim.x - 1) / blockDim.x,
(image->height + blockDim.y - 1) / blockDim.y);
// kernel for each pixel
kernelRenderPixels<<<gridDim, blockDim>>>();
cudaThreadSynchronize();
}
|
6fda4d9a062b6d7232ed057784ade7dfa0642104.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) Microsoft Corporation.
// SPDX-License-Identifier: Apache-2.0
// DeepSpeed Team
#include <limits>
#include "conversion_utils.h"
#include "inference_cuda_layers.h"
#ifndef __HIP_PLATFORM_HCC__
#include <hip/hip_runtime_api.h>
#endif
#include <cstdio>
#include <cstdlib>
#include <ctime>
#define MAX_REG_SIZE 8
#define minus_infinity -10000.0
void CheckCudaErrorAux(const char* file, unsigned line)
{
hipError_t err = hipGetLastError();
if (err == hipSuccess) return;
std::cerr << hipGetErrorString(err) << "(" << err << ") at " << file << ":" << line
<< std::endl;
throw std::runtime_error("CUDA ERROR!!!\n");
}
#define CUDA_CHECK_ERROR() CheckCudaErrorAux(__FILE__, __LINE__)
namespace cg = cooperative_groups;
template <typename T, int iterations>
__global__ void attn_softmax_v2(T* vals,
T* mask,
T* alibi,
float layer_scale,
bool triangular,
bool recompute,
bool local_attention,
int window_size,
int total_count,
int heads,
int sequence_length,
int num_seq,
int head_offset,
int mask_stride,
int mp_size,
int reduceWidth)
{
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
float2 low_data[MAX_REG_SIZE];
float2 high_data[MAX_REG_SIZE];
const T zero_h = conversion::to<T>(0.f);
int wid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int reduce_blocks = reduceWidth >> 5;
int seq_lane = threadIdx.x % reduceWidth;
__shared__ float partialSum[MAX_WARP_NUM];
int iter_offset = blockIdx.x * (warp_num / reduce_blocks) + (wid / reduce_blocks);
int batch_idx = iter_offset / (num_seq * heads);
int alibi_offset = batch_idx * heads * mp_size + head_offset;
int mask_offset = batch_idx * mask_stride + (iter_offset % mask_stride);
if (iter_offset < total_count) {
vals += (iter_offset * sequence_length);
alibi_offset = (alibi_offset + ((iter_offset / num_seq) % heads)) * sequence_length;
mask_offset = mask_offset * sequence_length;
int seq_id = iter_offset % num_seq;
int real_seq_id = seq_id + (num_seq == sequence_length ? 0 : sequence_length);
int window_stride4 = (local_attention && (real_seq_id >> 2) > (window_size >> 2))
? (real_seq_id >> 2) - (window_size >> 2)
: 0;
int window_stride =
(local_attention && real_seq_id >= window_size) ? real_seq_id - window_size : -1;
float max_val = minus_infinity;
// if (lane == 0) printf("%d, %d: %d \n", wid, blockIdx.x, mask_offset);
for (int i = 0; i < iterations; i++) {
int data_id = i * (reduceWidth << 2) + (seq_lane);
bool check = (data_id >> 2) >= window_stride4;
bool low_x_check = check && (data_id < sequence_length) &&
(!triangular || (data_id <= seq_id)) && (data_id > window_stride);
bool low_y_check = check && ((data_id + reduceWidth) < sequence_length) &&
(!triangular || ((data_id + reduceWidth) <= seq_id)) &&
((data_id + reduceWidth) > window_stride);
bool high_x_check = check && ((data_id + reduceWidth * 2) < sequence_length) &&
(!triangular || ((data_id + reduceWidth * 2) <= seq_id)) &&
((data_id + reduceWidth * 2) > window_stride);
bool high_y_check = check && ((data_id + reduceWidth * 3) < sequence_length) &&
(!triangular || ((data_id + reduceWidth * 3) <= seq_id)) &&
((data_id + reduceWidth * 3) > window_stride);
if (mask && alibi) {
low_data[i].x = low_x_check
? conversion::to<float>(vals[data_id]) * layer_scale +
(conversion::to<float>(alibi[data_id + alibi_offset])) +
(conversion::to<float>(mask[data_id + mask_offset]))
: minus_infinity;
low_data[i].y =
low_y_check
? conversion::to<float>(vals[data_id + reduceWidth]) * layer_scale +
(conversion::to<float>(alibi[data_id + alibi_offset + reduceWidth])) +
(conversion::to<float>(mask[data_id + mask_offset + reduceWidth]))
: minus_infinity;
high_data[i].x =
high_x_check
? conversion::to<float>(vals[data_id + reduceWidth * 2]) * layer_scale +
(conversion::to<float>(
alibi[data_id + alibi_offset + reduceWidth * 2])) +
(conversion::to<float>(mask[data_id + mask_offset + reduceWidth * 2]))
: minus_infinity;
high_data[i].y =
high_y_check
? conversion::to<float>(vals[data_id + reduceWidth * 3]) * layer_scale +
(conversion::to<float>(
alibi[data_id + alibi_offset + reduceWidth * 3])) +
(conversion::to<float>(mask[data_id + mask_offset + reduceWidth * 3]))
: minus_infinity;
} else if (mask) {
low_data[i].x = low_x_check
? conversion::to<float>(vals[data_id]) * layer_scale +
(conversion::to<float>(mask[data_id + mask_offset]))
: minus_infinity;
low_data[i].y =
low_y_check
? conversion::to<float>(vals[data_id + reduceWidth]) * layer_scale +
(conversion::to<float>(mask[data_id + mask_offset + reduceWidth]))
: minus_infinity;
high_data[i].x =
high_x_check
? conversion::to<float>(vals[data_id + reduceWidth * 2]) * layer_scale +
(conversion::to<float>(mask[data_id + mask_offset + reduceWidth * 2]))
: minus_infinity;
high_data[i].y =
high_y_check
? conversion::to<float>(vals[data_id + reduceWidth * 3]) * layer_scale +
(conversion::to<float>(mask[data_id + mask_offset + reduceWidth * 3]))
: minus_infinity;
} else if (alibi) {
low_data[i].x = low_x_check
? conversion::to<float>(vals[data_id]) * layer_scale +
(conversion::to<float>(alibi[data_id + alibi_offset]))
: minus_infinity;
low_data[i].y =
low_y_check
? conversion::to<float>(vals[data_id + reduceWidth]) * layer_scale +
(conversion::to<float>(alibi[data_id + alibi_offset + reduceWidth]))
: minus_infinity;
high_data[i].x =
high_x_check
? conversion::to<float>(vals[data_id + reduceWidth * 2]) * layer_scale +
(conversion::to<float>(
alibi[data_id + alibi_offset + reduceWidth * 2]))
: minus_infinity;
high_data[i].y =
high_y_check
? conversion::to<float>(vals[data_id + reduceWidth * 3]) * layer_scale +
(conversion::to<float>(
alibi[data_id + alibi_offset + reduceWidth * 3]))
: minus_infinity;
} else {
low_data[i].x = low_x_check ? conversion::to<float>(vals[data_id]) * layer_scale
: minus_infinity;
low_data[i].y =
low_y_check ? conversion::to<float>(vals[data_id + reduceWidth]) * layer_scale
: minus_infinity;
high_data[i].x =
high_x_check
? conversion::to<float>(vals[data_id + reduceWidth * 2]) * layer_scale
: minus_infinity;
high_data[i].y =
high_y_check
? conversion::to<float>(vals[data_id + reduceWidth * 3]) * layer_scale
: minus_infinity;
}
// if(lane == 0) printf("%f , %d, %d \n", low_data[i].x, data_id, seq_id);
max_val = (low_data[i].x > max_val ? low_data[i].x : max_val);
max_val = (low_data[i].y > max_val ? low_data[i].y : max_val);
max_val = (high_data[i].x > max_val ? high_data[i].x : max_val);
max_val = (high_data[i].y > max_val ? high_data[i].y : max_val);
}
for (int i = 1; i < WARP_SIZE; i *= 2) {
auto temp = g.shfl_xor(max_val, i);
max_val = (temp > max_val ? temp : max_val);
}
if (reduceWidth > WARP_SIZE) {
if (lane == 0) partialSum[wid] = max_val;
b.sync();
if (lane < warp_num) max_val = partialSum[lane];
b.sync();
for (int i = 1; i < reduce_blocks; i *= 2) {
auto temp = g.shfl_xor(max_val, i);
max_val = (temp > max_val ? temp : max_val);
}
max_val = g.shfl(max_val, threadIdx.x / WARP_SIZE);
}
float sum = 0;
for (int i = 0; i < iterations; i++) {
low_data[i].x = __expf(low_data[i].x - max_val);
low_data[i].y = __expf(low_data[i].y - max_val);
high_data[i].x = __expf(high_data[i].x - max_val);
high_data[i].y = __expf(high_data[i].y - max_val);
sum += (low_data[i].x + low_data[i].y + high_data[i].x + high_data[i].y);
}
for (int i = 1; i < WARP_SIZE; i *= 2) sum += g.shfl_xor(sum, i);
if (reduceWidth > WARP_SIZE) {
if (lane == 0) partialSum[wid] = sum;
b.sync();
if (lane < warp_num) sum = partialSum[lane];
b.sync();
for (int i = 1; i < reduce_blocks; i *= 2) { sum += g.shfl_xor(sum, i); }
sum = g.shfl(sum, threadIdx.x / WARP_SIZE);
}
sum += 1e-6;
for (int i = 0; i < iterations; i++) {
int data_id = i * (reduceWidth << 2) + (seq_lane);
if (data_id < sequence_length) {
vals[data_id] = conversion::to<T>(low_data[i].x / sum);
if ((data_id + reduceWidth) < sequence_length)
vals[data_id + reduceWidth] = conversion::to<T>(low_data[i].y / sum);
if ((data_id + reduceWidth * 2) < sequence_length)
vals[data_id + reduceWidth * 2] = conversion::to<T>(high_data[i].x / sum);
if ((data_id + reduceWidth * 3) < sequence_length)
vals[data_id + reduceWidth * 3] = conversion::to<T>(high_data[i].y / sum);
}
}
}
}
template <int iterations>
__global__ void attn_softmax_v2(float* vals,
float* attn_mask,
float* alibi,
float layer_scale,
bool triangular,
bool recompute,
bool local_attention,
int window_size,
int total_count,
int heads,
int sequence_length,
int num_seq,
int head_offset,
int mask_stride,
int mp_size,
int reduceWidth)
{
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
float4 data[MAX_REG_SIZE];
int wid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int reduce_blocks = reduceWidth >> 5;
int seq_lane = threadIdx.x % reduceWidth;
__shared__ float partialSum[MAX_WARP_NUM];
int iter_offset = blockIdx.x * (warp_num / reduce_blocks) + (wid / reduce_blocks);
if (iter_offset < total_count) {
vals += (iter_offset * sequence_length);
int batch_idx = iter_offset / (num_seq * heads);
int mask_offset = batch_idx * mask_stride + (iter_offset % mask_stride);
mask_offset = mask_offset * sequence_length;
int seq_id = iter_offset % num_seq;
int real_seq_id = seq_id + (num_seq == sequence_length ? 0 : sequence_length);
int window_stride4 = (local_attention && (real_seq_id >> 2) > (window_size >> 2))
? (real_seq_id >> 2) - (window_size >> 2)
: 0;
int window_stride =
(local_attention && real_seq_id >= window_size) ? real_seq_id - window_size : -1;
float max_val = minus_infinity;
for (int i = 0; i < iterations; i++) {
int data_id = i * (reduceWidth << 2) + (seq_lane);
bool check = (data_id >> 2) >= window_stride4;
bool x_check = check && (data_id < sequence_length) &&
(!triangular || (data_id <= seq_id)) && (data_id > window_stride);
bool y_check = check && ((data_id + reduceWidth) < sequence_length) &&
(!triangular || ((data_id + reduceWidth) <= seq_id)) &&
((data_id + reduceWidth) > window_stride);
bool z_check = check && ((data_id + reduceWidth * 2) < sequence_length) &&
(!triangular || ((data_id + reduceWidth * 2) <= seq_id)) &&
((data_id + reduceWidth * 2) > window_stride);
bool w_check = check && ((data_id + reduceWidth * 3) < sequence_length) &&
(!triangular || ((data_id + reduceWidth * 3) <= seq_id)) &&
((data_id + reduceWidth * 3) > window_stride);
if (attn_mask) {
data[i].x = x_check ? vals[data_id] + attn_mask[data_id + mask_offset]
: minus_infinity;
data[i].y = y_check ? vals[data_id + reduceWidth] +
attn_mask[data_id + mask_offset + reduceWidth]
: minus_infinity;
data[i].z = z_check ? vals[data_id + reduceWidth * 2] +
attn_mask[data_id + mask_offset + reduceWidth * 2]
: minus_infinity;
data[i].w = w_check ? vals[data_id + reduceWidth * 3] +
attn_mask[data_id + mask_offset + reduceWidth * 3]
: minus_infinity;
} else {
data[i].x = x_check ? vals[data_id] : minus_infinity;
data[i].y = y_check ? vals[data_id + reduceWidth] : minus_infinity;
data[i].z = z_check ? vals[data_id + reduceWidth * 2] : minus_infinity;
data[i].w = w_check ? vals[data_id + reduceWidth * 3] : minus_infinity;
}
max_val = (data[i].x > max_val ? data[i].x : max_val);
max_val = (data[i].y > max_val ? data[i].y : max_val);
max_val = (data[i].z > max_val ? data[i].z : max_val);
max_val = (data[i].w > max_val ? data[i].w : max_val);
}
for (int i = 1; i < WARP_SIZE; i *= 2) {
auto temp = g.shfl_xor(max_val, i);
max_val = (temp > max_val ? temp : max_val);
}
if (reduceWidth > WARP_SIZE) {
if (lane == 0) partialSum[wid] = max_val;
b.sync();
if (lane < warp_num) max_val = partialSum[lane];
b.sync();
for (int i = 1; i < reduce_blocks; i *= 2) {
auto temp = g.shfl_xor(max_val, i);
max_val = (temp > max_val ? temp : max_val);
}
max_val = g.shfl(max_val, threadIdx.x / WARP_SIZE);
}
float sum = 0;
for (int i = 0; i < iterations; i++) {
data[i].x = __expf(data[i].x - max_val);
data[i].y = __expf(data[i].y - max_val);
data[i].z = __expf(data[i].z - max_val);
data[i].w = __expf(data[i].w - max_val);
sum += (data[i].x + data[i].y + data[i].z + data[i].w);
}
for (int i = 1; i < WARP_SIZE; i *= 2) sum += g.shfl_xor(sum, i);
if (reduceWidth > WARP_SIZE) {
if (lane == 0) partialSum[wid] = sum;
b.sync();
if (lane < warp_num) sum = partialSum[lane];
b.sync();
for (int i = 1; i < reduce_blocks; i *= 2) { sum += g.shfl_xor(sum, i); }
sum = g.shfl(sum, threadIdx.x / WARP_SIZE);
}
sum += 1e-6;
for (int i = 0; i < iterations; i++) {
int data_id = i * (reduceWidth << 2) + (seq_lane);
if (data_id < sequence_length) {
vals[data_id] = data[i].x / sum;
if ((data_id + reduceWidth) < sequence_length)
vals[data_id + reduceWidth] = data[i].y / sum;
if ((data_id + reduceWidth * 2) < sequence_length)
vals[data_id + reduceWidth * 2] = data[i].z / sum;
if ((data_id + reduceWidth * 3) < sequence_length)
vals[data_id + reduceWidth * 3] = data[i].w / sum;
}
}
}
}
#define LAUNCH_ATTN_SOFTMAX_V2(iterations) \
hipLaunchKernelGGL(( attn_softmax_v2<T, iterations>), dim3(grid), dim3(block), 0, stream, vals, \
mask, \
alibi, \
layer_scale, \
triangular, \
recompute, \
local_attention, \
window_size, \
total_count, \
heads, \
sequence_length, \
num_seq, \
head_offset, \
mask_stride, \
mp_size, \
reduce_width);
template <typename T>
void launch_attn_softmax_v2(T* vals,
T* mask,
T* alibi,
float layer_scale,
bool triangular,
bool recompute,
bool local_attention,
int window_size,
int batch_size,
int heads,
int num_seq,
int sequence_length,
int head_offset,
int mask_stride,
int mp_size,
hipStream_t stream)
{
const int total_count = batch_size * heads * num_seq;
// Scheduling Overview
// 4 element unroll with power of 2 `reduce_width` threads to a ceiling of `attn_threads`
// Each block should be partitioned into as many `reduce_width` blocks
// as can be fit.
constexpr int attn_threads = 256;
constexpr int min_reduce_width = hw_warp_size;
constexpr int internal_unroll = 4;
// Handle internal unroll then round to next power of 2. Bump up to minimum granularity.
const int thread_steps_rounded =
next_pow2((sequence_length + internal_unroll - 1) / internal_unroll);
const int thread_steps_schedule =
(thread_steps_rounded < min_reduce_width) ? min_reduce_width : thread_steps_rounded;
// Bound reduce width to the number of threads
const int reduce_width = (thread_steps_schedule < attn_threads) ? thread_steps_schedule
: attn_threads;
// Scale for the excess
const int iterations = thread_steps_schedule / reduce_width;
// Should be safe since reduce_width is capped to attn_threads
const int partitions = attn_threads / reduce_width;
// Launch params
dim3 grid((total_count + partitions - 1) / partitions);
dim3 block(attn_threads);
if (sequence_length <= 32768) {
if (iterations == 1) {
LAUNCH_ATTN_SOFTMAX_V2(1);
} else if (iterations == 2) {
LAUNCH_ATTN_SOFTMAX_V2(2);
} else if (iterations == 4) {
LAUNCH_ATTN_SOFTMAX_V2(4);
} else if (iterations == 8) {
LAUNCH_ATTN_SOFTMAX_V2(8);
} else if (iterations == 16) {
LAUNCH_ATTN_SOFTMAX_V2(16);
} else if (iterations == 32) {
LAUNCH_ATTN_SOFTMAX_V2(32);
} else if (iterations == 64) {
LAUNCH_ATTN_SOFTMAX_V2(64);
}
} else
throw std::runtime_error("Unsupport Seq_Length!");
}
#define INSTANTIATE_LAUNCH_ATTN_SOFTMAX_V2(T) \
template void launch_attn_softmax_v2(T* vals, \
T* mask, \
T* alibi, \
float layer_scale, \
bool triangular, \
bool recompute, \
bool local_attention, \
int window_size, \
int batch_size, \
int heads, \
int num_seq, \
int sequence_length, \
int head_offset, \
int mask_stride, \
int mp_size, \
hipStream_t stream);
INSTANTIATE_LAUNCH_ATTN_SOFTMAX_V2(float);
#ifdef BF16_AVAILABLE
INSTANTIATE_LAUNCH_ATTN_SOFTMAX_V2(__nv_bfloat16);
#endif
INSTANTIATE_LAUNCH_ATTN_SOFTMAX_V2(__half);
#define DEF_ATTN_SOFTMAX_V2_HALF(_iter) \
template __global__ void attn_softmax_v2<__half, _iter>(__half * vals, \
__half * mask, \
__half * alibi, \
float layer_scale, \
bool triangular, \
bool recompute, \
bool local_attention, \
int window_size, \
int total_count, \
int heads, \
int sequence_length, \
int num_seq, \
int head_offset, \
int mask_stride, \
int mp_size, \
int reduceWidth)
#define DEF_ATTN_SOFTMAX_V2_BF16(_iter) \
template __global__ void attn_softmax_v2<__nv_bfloat16, _iter>(__nv_bfloat16 * vals, \
__nv_bfloat16 * mask, \
__nv_bfloat16 * alibi, \
float layer_scale, \
bool triangular, \
bool recompute, \
bool local_attention, \
int window_size, \
int total_count, \
int heads, \
int sequence_length, \
int num_seq, \
int head_offset, \
int mask_stride, \
int mp_size, \
int reduceWidth)
#define FOREACH_ITERATIONS(cb) \
cb(1); \
cb(2); \
cb(4); \
cb(8); \
cb(16); \
cb(32); \
cb(64)
FOREACH_ITERATIONS(DEF_ATTN_SOFTMAX_V2_HALF);
#ifdef BF16_AVAILABLE
FOREACH_ITERATIONS(DEF_ATTN_SOFTMAX_V2_BF16);
#endif
| 6fda4d9a062b6d7232ed057784ade7dfa0642104.cu | // Copyright (c) Microsoft Corporation.
// SPDX-License-Identifier: Apache-2.0
// DeepSpeed Team
#include <limits>
#include "conversion_utils.h"
#include "inference_cuda_layers.h"
#ifndef __HIP_PLATFORM_HCC__
#include <cuda_profiler_api.h>
#endif
#include <cstdio>
#include <cstdlib>
#include <ctime>
#define MAX_REG_SIZE 8
#define minus_infinity -10000.0
void CheckCudaErrorAux(const char* file, unsigned line)
{
cudaError_t err = cudaGetLastError();
if (err == cudaSuccess) return;
std::cerr << cudaGetErrorString(err) << "(" << err << ") at " << file << ":" << line
<< std::endl;
throw std::runtime_error("CUDA ERROR!!!\n");
}
#define CUDA_CHECK_ERROR() CheckCudaErrorAux(__FILE__, __LINE__)
namespace cg = cooperative_groups;
template <typename T, int iterations>
__global__ void attn_softmax_v2(T* vals,
T* mask,
T* alibi,
float layer_scale,
bool triangular,
bool recompute,
bool local_attention,
int window_size,
int total_count,
int heads,
int sequence_length,
int num_seq,
int head_offset,
int mask_stride,
int mp_size,
int reduceWidth)
{
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
float2 low_data[MAX_REG_SIZE];
float2 high_data[MAX_REG_SIZE];
const T zero_h = conversion::to<T>(0.f);
int wid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int reduce_blocks = reduceWidth >> 5;
int seq_lane = threadIdx.x % reduceWidth;
__shared__ float partialSum[MAX_WARP_NUM];
int iter_offset = blockIdx.x * (warp_num / reduce_blocks) + (wid / reduce_blocks);
int batch_idx = iter_offset / (num_seq * heads);
int alibi_offset = batch_idx * heads * mp_size + head_offset;
int mask_offset = batch_idx * mask_stride + (iter_offset % mask_stride);
if (iter_offset < total_count) {
vals += (iter_offset * sequence_length);
alibi_offset = (alibi_offset + ((iter_offset / num_seq) % heads)) * sequence_length;
mask_offset = mask_offset * sequence_length;
int seq_id = iter_offset % num_seq;
int real_seq_id = seq_id + (num_seq == sequence_length ? 0 : sequence_length);
int window_stride4 = (local_attention && (real_seq_id >> 2) > (window_size >> 2))
? (real_seq_id >> 2) - (window_size >> 2)
: 0;
int window_stride =
(local_attention && real_seq_id >= window_size) ? real_seq_id - window_size : -1;
float max_val = minus_infinity;
// if (lane == 0) printf("%d, %d: %d \n", wid, blockIdx.x, mask_offset);
for (int i = 0; i < iterations; i++) {
int data_id = i * (reduceWidth << 2) + (seq_lane);
bool check = (data_id >> 2) >= window_stride4;
bool low_x_check = check && (data_id < sequence_length) &&
(!triangular || (data_id <= seq_id)) && (data_id > window_stride);
bool low_y_check = check && ((data_id + reduceWidth) < sequence_length) &&
(!triangular || ((data_id + reduceWidth) <= seq_id)) &&
((data_id + reduceWidth) > window_stride);
bool high_x_check = check && ((data_id + reduceWidth * 2) < sequence_length) &&
(!triangular || ((data_id + reduceWidth * 2) <= seq_id)) &&
((data_id + reduceWidth * 2) > window_stride);
bool high_y_check = check && ((data_id + reduceWidth * 3) < sequence_length) &&
(!triangular || ((data_id + reduceWidth * 3) <= seq_id)) &&
((data_id + reduceWidth * 3) > window_stride);
if (mask && alibi) {
low_data[i].x = low_x_check
? conversion::to<float>(vals[data_id]) * layer_scale +
(conversion::to<float>(alibi[data_id + alibi_offset])) +
(conversion::to<float>(mask[data_id + mask_offset]))
: minus_infinity;
low_data[i].y =
low_y_check
? conversion::to<float>(vals[data_id + reduceWidth]) * layer_scale +
(conversion::to<float>(alibi[data_id + alibi_offset + reduceWidth])) +
(conversion::to<float>(mask[data_id + mask_offset + reduceWidth]))
: minus_infinity;
high_data[i].x =
high_x_check
? conversion::to<float>(vals[data_id + reduceWidth * 2]) * layer_scale +
(conversion::to<float>(
alibi[data_id + alibi_offset + reduceWidth * 2])) +
(conversion::to<float>(mask[data_id + mask_offset + reduceWidth * 2]))
: minus_infinity;
high_data[i].y =
high_y_check
? conversion::to<float>(vals[data_id + reduceWidth * 3]) * layer_scale +
(conversion::to<float>(
alibi[data_id + alibi_offset + reduceWidth * 3])) +
(conversion::to<float>(mask[data_id + mask_offset + reduceWidth * 3]))
: minus_infinity;
} else if (mask) {
low_data[i].x = low_x_check
? conversion::to<float>(vals[data_id]) * layer_scale +
(conversion::to<float>(mask[data_id + mask_offset]))
: minus_infinity;
low_data[i].y =
low_y_check
? conversion::to<float>(vals[data_id + reduceWidth]) * layer_scale +
(conversion::to<float>(mask[data_id + mask_offset + reduceWidth]))
: minus_infinity;
high_data[i].x =
high_x_check
? conversion::to<float>(vals[data_id + reduceWidth * 2]) * layer_scale +
(conversion::to<float>(mask[data_id + mask_offset + reduceWidth * 2]))
: minus_infinity;
high_data[i].y =
high_y_check
? conversion::to<float>(vals[data_id + reduceWidth * 3]) * layer_scale +
(conversion::to<float>(mask[data_id + mask_offset + reduceWidth * 3]))
: minus_infinity;
} else if (alibi) {
low_data[i].x = low_x_check
? conversion::to<float>(vals[data_id]) * layer_scale +
(conversion::to<float>(alibi[data_id + alibi_offset]))
: minus_infinity;
low_data[i].y =
low_y_check
? conversion::to<float>(vals[data_id + reduceWidth]) * layer_scale +
(conversion::to<float>(alibi[data_id + alibi_offset + reduceWidth]))
: minus_infinity;
high_data[i].x =
high_x_check
? conversion::to<float>(vals[data_id + reduceWidth * 2]) * layer_scale +
(conversion::to<float>(
alibi[data_id + alibi_offset + reduceWidth * 2]))
: minus_infinity;
high_data[i].y =
high_y_check
? conversion::to<float>(vals[data_id + reduceWidth * 3]) * layer_scale +
(conversion::to<float>(
alibi[data_id + alibi_offset + reduceWidth * 3]))
: minus_infinity;
} else {
low_data[i].x = low_x_check ? conversion::to<float>(vals[data_id]) * layer_scale
: minus_infinity;
low_data[i].y =
low_y_check ? conversion::to<float>(vals[data_id + reduceWidth]) * layer_scale
: minus_infinity;
high_data[i].x =
high_x_check
? conversion::to<float>(vals[data_id + reduceWidth * 2]) * layer_scale
: minus_infinity;
high_data[i].y =
high_y_check
? conversion::to<float>(vals[data_id + reduceWidth * 3]) * layer_scale
: minus_infinity;
}
// if(lane == 0) printf("%f , %d, %d \n", low_data[i].x, data_id, seq_id);
max_val = (low_data[i].x > max_val ? low_data[i].x : max_val);
max_val = (low_data[i].y > max_val ? low_data[i].y : max_val);
max_val = (high_data[i].x > max_val ? high_data[i].x : max_val);
max_val = (high_data[i].y > max_val ? high_data[i].y : max_val);
}
for (int i = 1; i < WARP_SIZE; i *= 2) {
auto temp = g.shfl_xor(max_val, i);
max_val = (temp > max_val ? temp : max_val);
}
if (reduceWidth > WARP_SIZE) {
if (lane == 0) partialSum[wid] = max_val;
b.sync();
if (lane < warp_num) max_val = partialSum[lane];
b.sync();
for (int i = 1; i < reduce_blocks; i *= 2) {
auto temp = g.shfl_xor(max_val, i);
max_val = (temp > max_val ? temp : max_val);
}
max_val = g.shfl(max_val, threadIdx.x / WARP_SIZE);
}
float sum = 0;
for (int i = 0; i < iterations; i++) {
low_data[i].x = __expf(low_data[i].x - max_val);
low_data[i].y = __expf(low_data[i].y - max_val);
high_data[i].x = __expf(high_data[i].x - max_val);
high_data[i].y = __expf(high_data[i].y - max_val);
sum += (low_data[i].x + low_data[i].y + high_data[i].x + high_data[i].y);
}
for (int i = 1; i < WARP_SIZE; i *= 2) sum += g.shfl_xor(sum, i);
if (reduceWidth > WARP_SIZE) {
if (lane == 0) partialSum[wid] = sum;
b.sync();
if (lane < warp_num) sum = partialSum[lane];
b.sync();
for (int i = 1; i < reduce_blocks; i *= 2) { sum += g.shfl_xor(sum, i); }
sum = g.shfl(sum, threadIdx.x / WARP_SIZE);
}
sum += 1e-6;
for (int i = 0; i < iterations; i++) {
int data_id = i * (reduceWidth << 2) + (seq_lane);
if (data_id < sequence_length) {
vals[data_id] = conversion::to<T>(low_data[i].x / sum);
if ((data_id + reduceWidth) < sequence_length)
vals[data_id + reduceWidth] = conversion::to<T>(low_data[i].y / sum);
if ((data_id + reduceWidth * 2) < sequence_length)
vals[data_id + reduceWidth * 2] = conversion::to<T>(high_data[i].x / sum);
if ((data_id + reduceWidth * 3) < sequence_length)
vals[data_id + reduceWidth * 3] = conversion::to<T>(high_data[i].y / sum);
}
}
}
}
template <int iterations>
__global__ void attn_softmax_v2(float* vals,
float* attn_mask,
float* alibi,
float layer_scale,
bool triangular,
bool recompute,
bool local_attention,
int window_size,
int total_count,
int heads,
int sequence_length,
int num_seq,
int head_offset,
int mask_stride,
int mp_size,
int reduceWidth)
{
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
float4 data[MAX_REG_SIZE];
int wid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int reduce_blocks = reduceWidth >> 5;
int seq_lane = threadIdx.x % reduceWidth;
__shared__ float partialSum[MAX_WARP_NUM];
int iter_offset = blockIdx.x * (warp_num / reduce_blocks) + (wid / reduce_blocks);
if (iter_offset < total_count) {
vals += (iter_offset * sequence_length);
int batch_idx = iter_offset / (num_seq * heads);
int mask_offset = batch_idx * mask_stride + (iter_offset % mask_stride);
mask_offset = mask_offset * sequence_length;
int seq_id = iter_offset % num_seq;
int real_seq_id = seq_id + (num_seq == sequence_length ? 0 : sequence_length);
int window_stride4 = (local_attention && (real_seq_id >> 2) > (window_size >> 2))
? (real_seq_id >> 2) - (window_size >> 2)
: 0;
int window_stride =
(local_attention && real_seq_id >= window_size) ? real_seq_id - window_size : -1;
float max_val = minus_infinity;
for (int i = 0; i < iterations; i++) {
int data_id = i * (reduceWidth << 2) + (seq_lane);
bool check = (data_id >> 2) >= window_stride4;
bool x_check = check && (data_id < sequence_length) &&
(!triangular || (data_id <= seq_id)) && (data_id > window_stride);
bool y_check = check && ((data_id + reduceWidth) < sequence_length) &&
(!triangular || ((data_id + reduceWidth) <= seq_id)) &&
((data_id + reduceWidth) > window_stride);
bool z_check = check && ((data_id + reduceWidth * 2) < sequence_length) &&
(!triangular || ((data_id + reduceWidth * 2) <= seq_id)) &&
((data_id + reduceWidth * 2) > window_stride);
bool w_check = check && ((data_id + reduceWidth * 3) < sequence_length) &&
(!triangular || ((data_id + reduceWidth * 3) <= seq_id)) &&
((data_id + reduceWidth * 3) > window_stride);
if (attn_mask) {
data[i].x = x_check ? vals[data_id] + attn_mask[data_id + mask_offset]
: minus_infinity;
data[i].y = y_check ? vals[data_id + reduceWidth] +
attn_mask[data_id + mask_offset + reduceWidth]
: minus_infinity;
data[i].z = z_check ? vals[data_id + reduceWidth * 2] +
attn_mask[data_id + mask_offset + reduceWidth * 2]
: minus_infinity;
data[i].w = w_check ? vals[data_id + reduceWidth * 3] +
attn_mask[data_id + mask_offset + reduceWidth * 3]
: minus_infinity;
} else {
data[i].x = x_check ? vals[data_id] : minus_infinity;
data[i].y = y_check ? vals[data_id + reduceWidth] : minus_infinity;
data[i].z = z_check ? vals[data_id + reduceWidth * 2] : minus_infinity;
data[i].w = w_check ? vals[data_id + reduceWidth * 3] : minus_infinity;
}
max_val = (data[i].x > max_val ? data[i].x : max_val);
max_val = (data[i].y > max_val ? data[i].y : max_val);
max_val = (data[i].z > max_val ? data[i].z : max_val);
max_val = (data[i].w > max_val ? data[i].w : max_val);
}
for (int i = 1; i < WARP_SIZE; i *= 2) {
auto temp = g.shfl_xor(max_val, i);
max_val = (temp > max_val ? temp : max_val);
}
if (reduceWidth > WARP_SIZE) {
if (lane == 0) partialSum[wid] = max_val;
b.sync();
if (lane < warp_num) max_val = partialSum[lane];
b.sync();
for (int i = 1; i < reduce_blocks; i *= 2) {
auto temp = g.shfl_xor(max_val, i);
max_val = (temp > max_val ? temp : max_val);
}
max_val = g.shfl(max_val, threadIdx.x / WARP_SIZE);
}
float sum = 0;
for (int i = 0; i < iterations; i++) {
data[i].x = __expf(data[i].x - max_val);
data[i].y = __expf(data[i].y - max_val);
data[i].z = __expf(data[i].z - max_val);
data[i].w = __expf(data[i].w - max_val);
sum += (data[i].x + data[i].y + data[i].z + data[i].w);
}
for (int i = 1; i < WARP_SIZE; i *= 2) sum += g.shfl_xor(sum, i);
if (reduceWidth > WARP_SIZE) {
if (lane == 0) partialSum[wid] = sum;
b.sync();
if (lane < warp_num) sum = partialSum[lane];
b.sync();
for (int i = 1; i < reduce_blocks; i *= 2) { sum += g.shfl_xor(sum, i); }
sum = g.shfl(sum, threadIdx.x / WARP_SIZE);
}
sum += 1e-6;
for (int i = 0; i < iterations; i++) {
int data_id = i * (reduceWidth << 2) + (seq_lane);
if (data_id < sequence_length) {
vals[data_id] = data[i].x / sum;
if ((data_id + reduceWidth) < sequence_length)
vals[data_id + reduceWidth] = data[i].y / sum;
if ((data_id + reduceWidth * 2) < sequence_length)
vals[data_id + reduceWidth * 2] = data[i].z / sum;
if ((data_id + reduceWidth * 3) < sequence_length)
vals[data_id + reduceWidth * 3] = data[i].w / sum;
}
}
}
}
#define LAUNCH_ATTN_SOFTMAX_V2(iterations) \
attn_softmax_v2<T, iterations><<<grid, block, 0, stream>>>(vals, \
mask, \
alibi, \
layer_scale, \
triangular, \
recompute, \
local_attention, \
window_size, \
total_count, \
heads, \
sequence_length, \
num_seq, \
head_offset, \
mask_stride, \
mp_size, \
reduce_width);
template <typename T>
void launch_attn_softmax_v2(T* vals,
T* mask,
T* alibi,
float layer_scale,
bool triangular,
bool recompute,
bool local_attention,
int window_size,
int batch_size,
int heads,
int num_seq,
int sequence_length,
int head_offset,
int mask_stride,
int mp_size,
cudaStream_t stream)
{
const int total_count = batch_size * heads * num_seq;
// Scheduling Overview
// 4 element unroll with power of 2 `reduce_width` threads to a ceiling of `attn_threads`
// Each block should be partitioned into as many `reduce_width` blocks
// as can be fit.
constexpr int attn_threads = 256;
constexpr int min_reduce_width = hw_warp_size;
constexpr int internal_unroll = 4;
// Handle internal unroll then round to next power of 2. Bump up to minimum granularity.
const int thread_steps_rounded =
next_pow2((sequence_length + internal_unroll - 1) / internal_unroll);
const int thread_steps_schedule =
(thread_steps_rounded < min_reduce_width) ? min_reduce_width : thread_steps_rounded;
// Bound reduce width to the number of threads
const int reduce_width = (thread_steps_schedule < attn_threads) ? thread_steps_schedule
: attn_threads;
// Scale for the excess
const int iterations = thread_steps_schedule / reduce_width;
// Should be safe since reduce_width is capped to attn_threads
const int partitions = attn_threads / reduce_width;
// Launch params
dim3 grid((total_count + partitions - 1) / partitions);
dim3 block(attn_threads);
if (sequence_length <= 32768) {
if (iterations == 1) {
LAUNCH_ATTN_SOFTMAX_V2(1);
} else if (iterations == 2) {
LAUNCH_ATTN_SOFTMAX_V2(2);
} else if (iterations == 4) {
LAUNCH_ATTN_SOFTMAX_V2(4);
} else if (iterations == 8) {
LAUNCH_ATTN_SOFTMAX_V2(8);
} else if (iterations == 16) {
LAUNCH_ATTN_SOFTMAX_V2(16);
} else if (iterations == 32) {
LAUNCH_ATTN_SOFTMAX_V2(32);
} else if (iterations == 64) {
LAUNCH_ATTN_SOFTMAX_V2(64);
}
} else
throw std::runtime_error("Unsupport Seq_Length!");
}
#define INSTANTIATE_LAUNCH_ATTN_SOFTMAX_V2(T) \
template void launch_attn_softmax_v2(T* vals, \
T* mask, \
T* alibi, \
float layer_scale, \
bool triangular, \
bool recompute, \
bool local_attention, \
int window_size, \
int batch_size, \
int heads, \
int num_seq, \
int sequence_length, \
int head_offset, \
int mask_stride, \
int mp_size, \
cudaStream_t stream);
INSTANTIATE_LAUNCH_ATTN_SOFTMAX_V2(float);
#ifdef BF16_AVAILABLE
INSTANTIATE_LAUNCH_ATTN_SOFTMAX_V2(__nv_bfloat16);
#endif
INSTANTIATE_LAUNCH_ATTN_SOFTMAX_V2(__half);
#define DEF_ATTN_SOFTMAX_V2_HALF(_iter) \
template __global__ void attn_softmax_v2<__half, _iter>(__half * vals, \
__half * mask, \
__half * alibi, \
float layer_scale, \
bool triangular, \
bool recompute, \
bool local_attention, \
int window_size, \
int total_count, \
int heads, \
int sequence_length, \
int num_seq, \
int head_offset, \
int mask_stride, \
int mp_size, \
int reduceWidth)
#define DEF_ATTN_SOFTMAX_V2_BF16(_iter) \
template __global__ void attn_softmax_v2<__nv_bfloat16, _iter>(__nv_bfloat16 * vals, \
__nv_bfloat16 * mask, \
__nv_bfloat16 * alibi, \
float layer_scale, \
bool triangular, \
bool recompute, \
bool local_attention, \
int window_size, \
int total_count, \
int heads, \
int sequence_length, \
int num_seq, \
int head_offset, \
int mask_stride, \
int mp_size, \
int reduceWidth)
#define FOREACH_ITERATIONS(cb) \
cb(1); \
cb(2); \
cb(4); \
cb(8); \
cb(16); \
cb(32); \
cb(64)
FOREACH_ITERATIONS(DEF_ATTN_SOFTMAX_V2_HALF);
#ifdef BF16_AVAILABLE
FOREACH_ITERATIONS(DEF_ATTN_SOFTMAX_V2_BF16);
#endif
|
7e34d3622d7e5f73f99304ba79307eeca74b99cf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define N (1024*1024)
#define M (1000000)
__global__ void cudakernel(float *buf)
{
/*
this line is looking up the address i by taking the thread id (threadIdx.x)
adding to block id (blockIdx.x), which is multiplied by the block dimensions
This means that each thread coming into this has a unique ID, then by the line
below, the thread is assigned to work on a certain section of the data block
*/
int i = threadIdx.x + blockIdx.x * blockDim.x; //what is this line doing?
buf[i] = 1.0f * i / N;
for(int j = 0; j < M; j++)
buf[i] = buf[i] * buf[i] - 0.25f;
}
int main()
{
float data[N];
float *d_data; //device pointer
//allocate memory on GPU
hipMalloc((void**) &d_data, N*sizeof(float));
//invoke kernel with 4096 blocks of 256 threads
hipLaunchKernelGGL(( cudakernel), dim3(4096), dim3(256), 0, 0, d_data);
//copy results back to host
hipMemcpy(data, d_data, N*sizeof(float), hipMemcpyDeviceToHost);
hipFree(d_data);
int input;
printf("Enter an index: ");
scanf("%d", &input);
printf("data[%d] = %f\n", input, data[input]);
}
| 7e34d3622d7e5f73f99304ba79307eeca74b99cf.cu | #include <stdio.h>
#define N (1024*1024)
#define M (1000000)
__global__ void cudakernel(float *buf)
{
/*
this line is looking up the address i by taking the thread id (threadIdx.x)
adding to block id (blockIdx.x), which is multiplied by the block dimensions
This means that each thread coming into this has a unique ID, then by the line
below, the thread is assigned to work on a certain section of the data block
*/
int i = threadIdx.x + blockIdx.x * blockDim.x; //what is this line doing?
buf[i] = 1.0f * i / N;
for(int j = 0; j < M; j++)
buf[i] = buf[i] * buf[i] - 0.25f;
}
int main()
{
float data[N];
float *d_data; //device pointer
//allocate memory on GPU
cudaMalloc((void**) &d_data, N*sizeof(float));
//invoke kernel with 4096 blocks of 256 threads
cudakernel<<<4096, 256>>>(d_data);
//copy results back to host
cudaMemcpy(data, d_data, N*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_data);
int input;
printf("Enter an index: ");
scanf("%d", &input);
printf("data[%d] = %f\n", input, data[input]);
}
|
5563a69df82937b4ca59852d9a42c3a0db88cb46.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "refreshClusters.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
dim3 *sum = NULL;
hipMalloc(&sum, XSIZE*YSIZE);
dim3 *cluster = NULL;
hipMalloc(&cluster, XSIZE*YSIZE);
int *counter = NULL;
hipMalloc(&counter, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
refreshClusters), dim3(gridBlock),dim3(threadBlock), 0, 0, sum,cluster,counter);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
refreshClusters), dim3(gridBlock),dim3(threadBlock), 0, 0, sum,cluster,counter);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
refreshClusters), dim3(gridBlock),dim3(threadBlock), 0, 0, sum,cluster,counter);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 5563a69df82937b4ca59852d9a42c3a0db88cb46.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "refreshClusters.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
dim3 *sum = NULL;
cudaMalloc(&sum, XSIZE*YSIZE);
dim3 *cluster = NULL;
cudaMalloc(&cluster, XSIZE*YSIZE);
int *counter = NULL;
cudaMalloc(&counter, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
refreshClusters<<<gridBlock,threadBlock>>>(sum,cluster,counter);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
refreshClusters<<<gridBlock,threadBlock>>>(sum,cluster,counter);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
refreshClusters<<<gridBlock,threadBlock>>>(sum,cluster,counter);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
348676b895262dcd0f955d541cd2fb63d42037f9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "compute/reducesum/reducesum_internal.h"
namespace magmadnn {
namespace internal {
template <typename T>
__global__ void kernel_tensor_reducesum_full_device(T *arr, T *axes, unsigned int n_axes, int axis, T *out) {
/* TODO */
}
template <typename T>
void tensor_reducesum_full_device(Tensor<T> *x, unsigned int axis, Tensor<T> *out) {
/* TODO */
}
template void tensor_reducesum_full_device(Tensor<int> *x, unsigned int axis, Tensor<int> *out);
template void tensor_reducesum_full_device(Tensor<float> *x, unsigned int axis, Tensor<float> *out);
template void tensor_reducesum_full_device(Tensor<double> *x, unsigned int axis, Tensor<double> *out);
template <typename T>
__global__ void kernel_reducesum_full_device() {
/* TODOT */
}
template <typename T>
void reducesum_full_device(Tensor<T> *x, Tensor<T> *out) {
/* TODO */
}
template void reducesum_full_device(Tensor<int> *x, Tensor<int> *out);
template void reducesum_full_device(Tensor<float> *x, Tensor<float> *out);
template void reducesum_full_device(Tensor<double> *x, Tensor<double> *out);
} // namespace op
} // namespace magmadnn | 348676b895262dcd0f955d541cd2fb63d42037f9.cu |
#include "compute/reducesum/reducesum_internal.h"
namespace magmadnn {
namespace internal {
template <typename T>
__global__ void kernel_tensor_reducesum_full_device(T *arr, T *axes, unsigned int n_axes, int axis, T *out) {
/* TODO */
}
template <typename T>
void tensor_reducesum_full_device(Tensor<T> *x, unsigned int axis, Tensor<T> *out) {
/* TODO */
}
template void tensor_reducesum_full_device(Tensor<int> *x, unsigned int axis, Tensor<int> *out);
template void tensor_reducesum_full_device(Tensor<float> *x, unsigned int axis, Tensor<float> *out);
template void tensor_reducesum_full_device(Tensor<double> *x, unsigned int axis, Tensor<double> *out);
template <typename T>
__global__ void kernel_reducesum_full_device() {
/* TODOT */
}
template <typename T>
void reducesum_full_device(Tensor<T> *x, Tensor<T> *out) {
/* TODO */
}
template void reducesum_full_device(Tensor<int> *x, Tensor<int> *out);
template void reducesum_full_device(Tensor<float> *x, Tensor<float> *out);
template void reducesum_full_device(Tensor<double> *x, Tensor<double> *out);
} // namespace op
} // namespace magmadnn |
29722ae9ed6e0b413ada515e55e843242a4549f0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
int main() {
// Get number of GPUs
int deviceCount;
hipGetDeviceCount(&deviceCount);
printf("Number of GPU devices: %i\n", deviceCount);
// Get CUDA driver and runtime version
int driverVersion;
int runtimeVersion;
hipDriverGetVersion(&driverVersion);
hipRuntimeGetVersion(&runtimeVersion);
printf("CUDA Driver Version / Runtime Version: %d.%d / %d.%d\n", driverVersion/1000, (driverVersion%100)/10, runtimeVersion/1000, (runtimeVersion%100)/10);
// Get device properties
hipDeviceProp_t deviceProperties;
for (int i = 0; i < deviceCount; i++) {
hipGetDeviceProperties(&deviceProperties, i);
printf("Name: %s\n", deviceProperties.name);
}
return 0;
}
| 29722ae9ed6e0b413ada515e55e843242a4549f0.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
int main() {
// Get number of GPUs
int deviceCount;
cudaGetDeviceCount(&deviceCount);
printf("Number of GPU devices: %i\n", deviceCount);
// Get CUDA driver and runtime version
int driverVersion;
int runtimeVersion;
cudaDriverGetVersion(&driverVersion);
cudaRuntimeGetVersion(&runtimeVersion);
printf("CUDA Driver Version / Runtime Version: %d.%d / %d.%d\n", driverVersion/1000, (driverVersion%100)/10, runtimeVersion/1000, (runtimeVersion%100)/10);
// Get device properties
cudaDeviceProp deviceProperties;
for (int i = 0; i < deviceCount; i++) {
cudaGetDeviceProperties(&deviceProperties, i);
printf("Name: %s\n", deviceProperties.name);
}
return 0;
}
|
2c00cc17a5824fcec63d27409cf726d7482d49bb.hip | // !!! This is a file automatically generated by hipify!!!
/*-----------------------------------------------------------
** gaussian.cu -- The program is to solve a linear system Ax = b
** by using Gaussian Elimination. The algorithm on page 101
** ("Foundations of Parallel Programming") is used.
** The sequential version is gaussian.c. This parallel
** implementation converts three independent for() loops
** into three Fans. Use the data file ge_3.dat to verify
** the correction of the output.
**
** Written by Andreas Kura, 02/15/95
** Modified by Chong-wei Xu, 04/20/95
** Modified by Chris Gregg for CUDA, 07/20/2009
**-----------------------------------------------------------
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include "hip/hip_runtime.h"
#include <string.h>
#include <math.h>
#include <hip/hip_fp16.h>
#include "half_operator_overload.cuh"
#include "newhalf.hpp"
#ifdef RD_WG_SIZE_0_0
#define MAXBLOCKSIZE RD_WG_SIZE_0_0
#elif defined(RD_WG_SIZE_0)
#define MAXBLOCKSIZE RD_WG_SIZE_0
#elif defined(RD_WG_SIZE)
#define MAXBLOCKSIZE RD_WG_SIZE
#else
#define MAXBLOCKSIZE 512
#endif
//2D defines. Go from specific to general
#ifdef RD_WG_SIZE_1_0
#define BLOCK_SIZE_XY RD_WG_SIZE_1_0
#elif defined(RD_WG_SIZE_1)
#define BLOCK_SIZE_XY RD_WG_SIZE_1
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_XY RD_WG_SIZE
#else
#define BLOCK_SIZE_XY 4
#endif
int Size;
float *a, *b, *finalVec;
float *m;
FILE *fp;
void InitProblemOnce(char *filename);
void InitPerRun();
void ForwardSub();
void BackSub();
__global__ void Fan1(float *m, float *a, int Size, int t);
__global__ void Fan2(float *m, float *a, float *b,int Size, int j1, int t);
void InitMat(float *ary, int nrow, int ncol);
void InitAry(float *ary, int ary_size);
void PrintMat(float *ary, int nrow, int ncolumn);
void PrintAry(float *ary, int ary_size);
void PrintDeviceProperties();
void checkCUDAError(const char *msg);
unsigned int totalKernelTime = 0;
// create both matrix and right hand side, Ke Wang 2013/08/12 11:51:06
void
create_matrix(float *m, int size){
int i,j;
float lamda = -0.01;
float coe[2*size-1];
float coe_i =0.0;
for (i=0; i < size; i++)
{
coe_i = 10*exp(lamda*i);
j=size-1+i;
coe[j]=coe_i;
j=size-1-i;
coe[j]=coe_i;
}
for (i=0; i < size; i++) {
for (j=0; j < size; j++) {
m[i*size+j]=coe[size-1-i+j];
}
}
}
int main(int argc, char *argv[])
{
printf("WG size of kernel 1 = %d, WG size of kernel 2= %d X %d\n", MAXBLOCKSIZE, BLOCK_SIZE_XY, BLOCK_SIZE_XY);
int verbose = 1;
int i, j;
char flag;
if (argc < 2) {
printf("Usage: gaussian -f filename / -s size [-q]\n\n");
printf("-q (quiet) suppresses printing the matrix and result values.\n");
printf("-f (filename) path of input file\n");
printf("-s (size) size of matrix. Create matrix and rhs in this program \n");
printf("The first line of the file contains the dimension of the matrix, n.");
printf("The second line of the file is a newline.\n");
printf("The next n lines contain n tab separated values for the matrix.");
printf("The next line of the file is a newline.\n");
printf("The next line of the file is a 1xn vector with tab separated values.\n");
printf("The next line of the file is a newline. (optional)\n");
printf("The final line of the file is the pre-computed solution. (optional)\n");
printf("Example: matrix4.txt:\n");
printf("4\n");
printf("\n");
printf("-0.6 -0.5 0.7 0.3\n");
printf("-0.3 -0.9 0.3 0.7\n");
printf("-0.4 -0.5 -0.3 -0.8\n");
printf("0.0 -0.1 0.2 0.9\n");
printf("\n");
printf("-0.85 -0.68 0.24 -0.53\n");
printf("\n");
printf("0.7 0.0 -0.4 -0.5\n");
exit(0);
}
//PrintDeviceProperties();
//char filename[100];
//sprintf(filename,"matrices/matrix%d.txt",size);
for(i=1;i<argc;i++) {
if (argv[i][0]=='-') {// flag
flag = argv[i][1];
switch (flag) {
case 's': // platform
i++;
Size = atoi(argv[i]);
printf("Create matrix internally in parse, size = %d \n", Size);
a = (float *) malloc(Size * Size * sizeof(float));
create_matrix(a, Size);
b = (float *) malloc(Size * sizeof(float));
for (j =0; j< Size; j++)
b[j]=1.0;
m = (float *) malloc(Size * Size * sizeof(float));
break;
case 'f': // platform
i++;
printf("Read file from %s \n", argv[i]);
InitProblemOnce(argv[i]);
break;
case 'q': // quiet
verbose = 0;
break;
}
}
}
//InitProblemOnce(filename);
InitPerRun();
//begin timing
struct timeval time_start;
gettimeofday(&time_start, NULL);
// run kernels
ForwardSub();
//end timing
struct timeval time_end;
gettimeofday(&time_end, NULL);
unsigned int time_total = (time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec);
if (verbose) {
printf("Matrix m is: \n");
PrintMat(m, Size, Size);
printf("Matrix a is: \n");
PrintMat(a, Size, Size);
printf("Array b is: \n");
PrintAry(b, Size);
}
BackSub();
if (verbose) {
printf("The final solution is: \n");
PrintAry(finalVec,Size);
}
printf("\nTime total (including memory transfers)\t%f sec\n", time_total * 1e-6);
printf("Time for CUDA kernels:\t%f sec\n",totalKernelTime * 1e-6);
/*printf("%d,%d\n",size,time_total);
fprintf(stderr,"%d,%d\n",size,time_total);*/
printf("100 elems of finalvec: \n");
PrintAry(finalVec,100);
free(m);
free(a);
free(b);
}
/*------------------------------------------------------
** PrintDeviceProperties
**-----------------------------------------------------
*/
void PrintDeviceProperties(){
hipDeviceProp_t deviceProp;
int nDevCount = 0;
hipGetDeviceCount( &nDevCount );
printf( "Total Device found: %d", nDevCount );
for (int nDeviceIdx = 0; nDeviceIdx < nDevCount; ++nDeviceIdx )
{
memset( &deviceProp, 0, sizeof(deviceProp));
if( hipSuccess == hipGetDeviceProperties(&deviceProp, nDeviceIdx))
{
printf( "\nDevice Name \t\t - %s ", deviceProp.name );
printf( "\n**************************************");
printf( "\nTotal Global Memory\t\t\t - %lu KB", deviceProp.totalGlobalMem/1024 );
printf( "\nShared memory available per block \t - %lu KB", deviceProp.sharedMemPerBlock/1024 );
printf( "\nNumber of registers per thread block \t - %d", deviceProp.regsPerBlock );
printf( "\nWarp size in threads \t\t\t - %d", deviceProp.warpSize );
printf( "\nMemory Pitch \t\t\t\t - %zu bytes", deviceProp.memPitch );
printf( "\nMaximum threads per block \t\t - %d", deviceProp.maxThreadsPerBlock );
printf( "\nMaximum Thread Dimension (block) \t - %d %d %d", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2] );
printf( "\nMaximum Thread Dimension (grid) \t - %d %d %d", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2] );
printf( "\nTotal constant memory \t\t\t - %zu bytes", deviceProp.totalConstMem );
printf( "\nCUDA ver \t\t\t\t - %d.%d", deviceProp.major, deviceProp.minor );
printf( "\nClock rate \t\t\t\t - %d KHz", deviceProp.clockRate );
printf( "\nTexture Alignment \t\t\t - %zu bytes", deviceProp.textureAlignment );
printf( "\nDevice Overlap \t\t\t\t - %s", deviceProp. deviceOverlap?"Allowed":"Not Allowed" );
printf( "\nNumber of Multi processors \t\t - %d\n\n", deviceProp.multiProcessorCount );
}
else
printf( "\n%s", hipGetErrorString(hipGetLastError()));
}
}
/*------------------------------------------------------
** InitProblemOnce -- Initialize all of matrices and
** vectors by opening a data file specified by the user.
**
** We used dynamic array *a, *b, and *m to allocate
** the memory storages.
**------------------------------------------------------
*/
void InitProblemOnce(char *filename)
{
//char *filename = argv[1];
//printf("Enter the data file name: ");
//scanf("%s", filename);
//printf("The file name is: %s\n", filename);
fp = fopen(filename, "r");
fscanf(fp, "%d", &Size);
a = (float *) malloc(Size * Size * sizeof(float));
InitMat(a, Size, Size);
//printf("The input matrix a is:\n");
//PrintMat(a, Size, Size);
b = (float *) malloc(Size * sizeof(float));
InitAry(b, Size);
//printf("The input array b is:\n");
//PrintAry(b, Size);
m = (float *) malloc(Size * Size * sizeof(float));
}
/*------------------------------------------------------
** InitPerRun() -- Initialize the contents of the
** multipier matrix **m
**------------------------------------------------------
*/
void InitPerRun()
{
int i;
for (i=0; i<Size*Size; i++)
*(m+i) = 0.0;
}
/*-------------------------------------------------------
** Fan1() -- Calculate multiplier matrix
** Pay attention to the index. Index i give the range
** which starts from 0 to range-1. The real values of
** the index should be adjust and related with the value
** of t which is defined on the ForwardSub().
**-------------------------------------------------------
*/
__global__ void Fan1(half *m_cuda, half *a_cuda, int Size, int t)
{
//if(threadIdx.x + blockIdx.x * blockDim.x >= Size-1-t) printf(".");
//printf("blockIDx.x:%d,threadIdx.x:%d,Size:%d,t:%d,Size-1-t:%d\n",blockIdx.x,threadIdx.x,Size,t,Size-1-t);
if(threadIdx.x + blockIdx.x * blockDim.x >= Size-1-t) return;
int xidx = blockIdx.x * blockDim.x + threadIdx.x;
//~ *(m_cuda+Size*(blockDim.x*blockIdx.x+threadIdx.x+t+1)+t) = *(a_cuda+Size*(blockDim.x*blockIdx.x+threadIdx.x+t+1)+t) / *(a_cuda+Size*t+t);
m_cuda[Size*(xidx+t+1)+t] = a_cuda[Size*(xidx+t+1)+t] / a_cuda[Size*t+t];
}
/*-------------------------------------------------------
** Fan2() -- Modify the matrix A into LUD
**-------------------------------------------------------
*/
__global__ void Fan2(half *m_cuda, half *a_cuda, half *b_cuda,int Size, int j1, int t)
{
if(threadIdx.x + blockIdx.x * blockDim.x >= Size-1-t) return;
if(threadIdx.y + blockIdx.y * blockDim.y >= Size-t) return;
int xidx = blockIdx.x * blockDim.x + threadIdx.x;
int yidx = blockIdx.y * blockDim.y + threadIdx.y;
//printf("blockIdx.x:%d,threadIdx.x:%d,blockIdx.y:%d,threadIdx.y:%d,blockDim.x:%d,blockDim.y:%d\n",blockIdx.x,threadIdx.x,blockIdx.y,threadIdx.y,blockDim.x,blockDim.y);
a_cuda[Size*(xidx+1+t)+(yidx+t)] -= m_cuda[Size*(xidx+1+t)+t] * a_cuda[Size*t+(yidx+t)];
//a_cuda[xidx+1+t][yidx+t] -= m_cuda[xidx+1+t][t] * a_cuda[t][yidx+t];
if(yidx == 0){
//printf("blockIdx.x:%d,threadIdx.x:%d,blockIdx.y:%d,threadIdx.y:%d,blockDim.x:%d,blockDim.y:%d\n",blockIdx.x,threadIdx.x,blockIdx.y,threadIdx.y,blockDim.x,blockDim.y);
//printf("xidx:%d,yidx:%d\n",xidx,yidx);
b_cuda[xidx+1+t] -= m_cuda[Size*(xidx+1+t)+(yidx+t)] * b_cuda[t];
}
}
/*------------------------------------------------------
** ForwardSub() -- Forward substitution of Gaussian
** elimination.
**------------------------------------------------------
*/
void ForwardSub()
{
int t;
half *m_cuda,*a_cuda,*b_cuda;
half_float::half *m_half, *a_half,*b_half;
m_half = (half_float::half*)malloc( Size * Size * sizeof(half_float::half));
a_half = (half_float::half*)malloc( Size * Size * sizeof(half_float::half));
b_half = (half_float::half*)malloc( Size * sizeof(half_float::half));
for(int i =0;i<Size * Size;i ++){
m_half[i] = half_float::half(m[i]);
a_half[i] = half_float::half(a[i]);
}
for (int i=0; i<Size; i++){
b_half[i] = half_float::half(b[i]);
}
// allocate memory on GPU
hipMalloc((void **) &m_cuda, Size * Size * sizeof(half));
hipMalloc((void **) &a_cuda, Size * Size * sizeof(half));
hipMalloc((void **) &b_cuda, Size * sizeof(half));
// copy memory to GPU
hipMemcpy(m_cuda, m_half, Size * Size * sizeof(half),hipMemcpyHostToDevice );
hipMemcpy(a_cuda, a_half, Size * Size * sizeof(half),hipMemcpyHostToDevice );
hipMemcpy(b_cuda, b_half, Size * sizeof(half),hipMemcpyHostToDevice );
int block_size,grid_size;
block_size = MAXBLOCKSIZE;
grid_size = (Size/block_size) + (!(Size%block_size)? 0:1);
//printf("1d grid size: %d\n",grid_size);
dim3 dimBlock(block_size);
dim3 dimGrid(grid_size);
//dim3 dimGrid( (N/dimBlock.x) + (!(N%dimBlock.x)?0:1) );
int blockSize2d, gridSize2d;
blockSize2d = BLOCK_SIZE_XY;
gridSize2d = (Size/blockSize2d) + (!(Size%blockSize2d?0:1));
dim3 dimBlockXY(blockSize2d,blockSize2d);
dim3 dimGridXY(gridSize2d,gridSize2d);
// begin timing kernels
struct timeval time_start;
gettimeofday(&time_start, NULL);
//~ t=0;
for (t=0; t<(Size-1); t++)
{
hipLaunchKernelGGL(( Fan1), dim3(dimGrid),dim3(dimBlock), 0, 0, m_cuda,a_cuda,Size,t);
hipDeviceSynchronize();
hipLaunchKernelGGL(( Fan2), dim3(dimGridXY),dim3(dimBlockXY), 0, 0, m_cuda,a_cuda,b_cuda,Size,Size-t,t);
hipDeviceSynchronize();
checkCUDAError("Fan2");
}
// end timing kernels
struct timeval time_end;
gettimeofday(&time_end, NULL);
totalKernelTime = (time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec);
// copy memory back to CPU
hipMemcpy(m_half, m_cuda, Size * Size * sizeof(half),hipMemcpyDeviceToHost );
hipMemcpy(a_half, a_cuda, Size * Size * sizeof(half),hipMemcpyDeviceToHost );
hipMemcpy(b_half, b_cuda, Size * sizeof(half),hipMemcpyDeviceToHost );
for(int i =0;i<Size * Size;i ++){
m[i] = float(m_half[i]);
a[i] = float(a_half[i]);
}
//~ for (int i=0; i<Size; i++){
//~ b[i] = float(b_half[i]);
//~ printf ("%f, ",float(b_half[i]));
//~ }
//~ printf("\n");
free(m_half);
free(a_half);
free(b_half);
hipFree(m_cuda);
hipFree(a_cuda);
hipFree(b_cuda);
}
/*------------------------------------------------------
** BackSub() -- Backward substitution
**------------------------------------------------------
*/
void BackSub()
{
// create a new vector to hold the final answer
finalVec = (float *) malloc(Size * sizeof(float));
// solve "bottom up"
int i,j;
for(i=0;i<Size;i++){
finalVec[Size-i-1]=b[Size-i-1];
for(j=0;j<i;j++)
{
finalVec[Size-i-1]-=*(a+Size*(Size-i-1)+(Size-j-1)) * finalVec[Size-j-1];
}
finalVec[Size-i-1]=finalVec[Size-i-1]/ *(a+Size*(Size-i-1)+(Size-i-1));
}
}
void InitMat(float *ary, int nrow, int ncol)
{
int i, j;
for (i=0; i<nrow; i++) {
for (j=0; j<ncol; j++) {
fscanf(fp, "%f", ary+Size*i+j);
}
}
}
/*------------------------------------------------------
** PrintMat() -- Print the contents of the matrix
**------------------------------------------------------
*/
void PrintMat(float *ary, int nrow, int ncol)
{
int i, j;
for (i=0; i<nrow; i++) {
for (j=0; j<ncol; j++) {
printf("%8.2f ", *(ary+Size*i+j));
}
printf("\n");
}
printf("\n");
}
/*------------------------------------------------------
** InitAry() -- Initialize the array (vector) by reading
** data from the data file
**------------------------------------------------------
*/
void InitAry(float *ary, int ary_size)
{
int i;
for (i=0; i<ary_size; i++) {
fscanf(fp, "%f", &ary[i]);
}
}
/*------------------------------------------------------
** PrintAry() -- Print the contents of the array (vector)
**------------------------------------------------------
*/
void PrintAry(float *ary, int ary_size)
{
int i;
for (i=0; i<ary_size; i++) {
printf("%.2f ", ary[i]);
}
printf("\n\n");
}
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg,
hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
| 2c00cc17a5824fcec63d27409cf726d7482d49bb.cu | /*-----------------------------------------------------------
** gaussian.cu -- The program is to solve a linear system Ax = b
** by using Gaussian Elimination. The algorithm on page 101
** ("Foundations of Parallel Programming") is used.
** The sequential version is gaussian.c. This parallel
** implementation converts three independent for() loops
** into three Fans. Use the data file ge_3.dat to verify
** the correction of the output.
**
** Written by Andreas Kura, 02/15/95
** Modified by Chong-wei Xu, 04/20/95
** Modified by Chris Gregg for CUDA, 07/20/2009
**-----------------------------------------------------------
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include "cuda.h"
#include <string.h>
#include <math.h>
#include <cuda_fp16.h>
#include "half_operator_overload.cuh"
#include "newhalf.hpp"
#ifdef RD_WG_SIZE_0_0
#define MAXBLOCKSIZE RD_WG_SIZE_0_0
#elif defined(RD_WG_SIZE_0)
#define MAXBLOCKSIZE RD_WG_SIZE_0
#elif defined(RD_WG_SIZE)
#define MAXBLOCKSIZE RD_WG_SIZE
#else
#define MAXBLOCKSIZE 512
#endif
//2D defines. Go from specific to general
#ifdef RD_WG_SIZE_1_0
#define BLOCK_SIZE_XY RD_WG_SIZE_1_0
#elif defined(RD_WG_SIZE_1)
#define BLOCK_SIZE_XY RD_WG_SIZE_1
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_XY RD_WG_SIZE
#else
#define BLOCK_SIZE_XY 4
#endif
int Size;
float *a, *b, *finalVec;
float *m;
FILE *fp;
void InitProblemOnce(char *filename);
void InitPerRun();
void ForwardSub();
void BackSub();
__global__ void Fan1(float *m, float *a, int Size, int t);
__global__ void Fan2(float *m, float *a, float *b,int Size, int j1, int t);
void InitMat(float *ary, int nrow, int ncol);
void InitAry(float *ary, int ary_size);
void PrintMat(float *ary, int nrow, int ncolumn);
void PrintAry(float *ary, int ary_size);
void PrintDeviceProperties();
void checkCUDAError(const char *msg);
unsigned int totalKernelTime = 0;
// create both matrix and right hand side, Ke Wang 2013/08/12 11:51:06
void
create_matrix(float *m, int size){
int i,j;
float lamda = -0.01;
float coe[2*size-1];
float coe_i =0.0;
for (i=0; i < size; i++)
{
coe_i = 10*exp(lamda*i);
j=size-1+i;
coe[j]=coe_i;
j=size-1-i;
coe[j]=coe_i;
}
for (i=0; i < size; i++) {
for (j=0; j < size; j++) {
m[i*size+j]=coe[size-1-i+j];
}
}
}
int main(int argc, char *argv[])
{
printf("WG size of kernel 1 = %d, WG size of kernel 2= %d X %d\n", MAXBLOCKSIZE, BLOCK_SIZE_XY, BLOCK_SIZE_XY);
int verbose = 1;
int i, j;
char flag;
if (argc < 2) {
printf("Usage: gaussian -f filename / -s size [-q]\n\n");
printf("-q (quiet) suppresses printing the matrix and result values.\n");
printf("-f (filename) path of input file\n");
printf("-s (size) size of matrix. Create matrix and rhs in this program \n");
printf("The first line of the file contains the dimension of the matrix, n.");
printf("The second line of the file is a newline.\n");
printf("The next n lines contain n tab separated values for the matrix.");
printf("The next line of the file is a newline.\n");
printf("The next line of the file is a 1xn vector with tab separated values.\n");
printf("The next line of the file is a newline. (optional)\n");
printf("The final line of the file is the pre-computed solution. (optional)\n");
printf("Example: matrix4.txt:\n");
printf("4\n");
printf("\n");
printf("-0.6 -0.5 0.7 0.3\n");
printf("-0.3 -0.9 0.3 0.7\n");
printf("-0.4 -0.5 -0.3 -0.8\n");
printf("0.0 -0.1 0.2 0.9\n");
printf("\n");
printf("-0.85 -0.68 0.24 -0.53\n");
printf("\n");
printf("0.7 0.0 -0.4 -0.5\n");
exit(0);
}
//PrintDeviceProperties();
//char filename[100];
//sprintf(filename,"matrices/matrix%d.txt",size);
for(i=1;i<argc;i++) {
if (argv[i][0]=='-') {// flag
flag = argv[i][1];
switch (flag) {
case 's': // platform
i++;
Size = atoi(argv[i]);
printf("Create matrix internally in parse, size = %d \n", Size);
a = (float *) malloc(Size * Size * sizeof(float));
create_matrix(a, Size);
b = (float *) malloc(Size * sizeof(float));
for (j =0; j< Size; j++)
b[j]=1.0;
m = (float *) malloc(Size * Size * sizeof(float));
break;
case 'f': // platform
i++;
printf("Read file from %s \n", argv[i]);
InitProblemOnce(argv[i]);
break;
case 'q': // quiet
verbose = 0;
break;
}
}
}
//InitProblemOnce(filename);
InitPerRun();
//begin timing
struct timeval time_start;
gettimeofday(&time_start, NULL);
// run kernels
ForwardSub();
//end timing
struct timeval time_end;
gettimeofday(&time_end, NULL);
unsigned int time_total = (time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec);
if (verbose) {
printf("Matrix m is: \n");
PrintMat(m, Size, Size);
printf("Matrix a is: \n");
PrintMat(a, Size, Size);
printf("Array b is: \n");
PrintAry(b, Size);
}
BackSub();
if (verbose) {
printf("The final solution is: \n");
PrintAry(finalVec,Size);
}
printf("\nTime total (including memory transfers)\t%f sec\n", time_total * 1e-6);
printf("Time for CUDA kernels:\t%f sec\n",totalKernelTime * 1e-6);
/*printf("%d,%d\n",size,time_total);
fprintf(stderr,"%d,%d\n",size,time_total);*/
printf("100 elems of finalvec: \n");
PrintAry(finalVec,100);
free(m);
free(a);
free(b);
}
/*------------------------------------------------------
** PrintDeviceProperties
**-----------------------------------------------------
*/
void PrintDeviceProperties(){
cudaDeviceProp deviceProp;
int nDevCount = 0;
cudaGetDeviceCount( &nDevCount );
printf( "Total Device found: %d", nDevCount );
for (int nDeviceIdx = 0; nDeviceIdx < nDevCount; ++nDeviceIdx )
{
memset( &deviceProp, 0, sizeof(deviceProp));
if( cudaSuccess == cudaGetDeviceProperties(&deviceProp, nDeviceIdx))
{
printf( "\nDevice Name \t\t - %s ", deviceProp.name );
printf( "\n**************************************");
printf( "\nTotal Global Memory\t\t\t - %lu KB", deviceProp.totalGlobalMem/1024 );
printf( "\nShared memory available per block \t - %lu KB", deviceProp.sharedMemPerBlock/1024 );
printf( "\nNumber of registers per thread block \t - %d", deviceProp.regsPerBlock );
printf( "\nWarp size in threads \t\t\t - %d", deviceProp.warpSize );
printf( "\nMemory Pitch \t\t\t\t - %zu bytes", deviceProp.memPitch );
printf( "\nMaximum threads per block \t\t - %d", deviceProp.maxThreadsPerBlock );
printf( "\nMaximum Thread Dimension (block) \t - %d %d %d", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2] );
printf( "\nMaximum Thread Dimension (grid) \t - %d %d %d", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2] );
printf( "\nTotal constant memory \t\t\t - %zu bytes", deviceProp.totalConstMem );
printf( "\nCUDA ver \t\t\t\t - %d.%d", deviceProp.major, deviceProp.minor );
printf( "\nClock rate \t\t\t\t - %d KHz", deviceProp.clockRate );
printf( "\nTexture Alignment \t\t\t - %zu bytes", deviceProp.textureAlignment );
printf( "\nDevice Overlap \t\t\t\t - %s", deviceProp. deviceOverlap?"Allowed":"Not Allowed" );
printf( "\nNumber of Multi processors \t\t - %d\n\n", deviceProp.multiProcessorCount );
}
else
printf( "\n%s", cudaGetErrorString(cudaGetLastError()));
}
}
/*------------------------------------------------------
** InitProblemOnce -- Initialize all of matrices and
** vectors by opening a data file specified by the user.
**
** We used dynamic array *a, *b, and *m to allocate
** the memory storages.
**------------------------------------------------------
*/
void InitProblemOnce(char *filename)
{
//char *filename = argv[1];
//printf("Enter the data file name: ");
//scanf("%s", filename);
//printf("The file name is: %s\n", filename);
fp = fopen(filename, "r");
fscanf(fp, "%d", &Size);
a = (float *) malloc(Size * Size * sizeof(float));
InitMat(a, Size, Size);
//printf("The input matrix a is:\n");
//PrintMat(a, Size, Size);
b = (float *) malloc(Size * sizeof(float));
InitAry(b, Size);
//printf("The input array b is:\n");
//PrintAry(b, Size);
m = (float *) malloc(Size * Size * sizeof(float));
}
/*------------------------------------------------------
** InitPerRun() -- Initialize the contents of the
** multipier matrix **m
**------------------------------------------------------
*/
void InitPerRun()
{
int i;
for (i=0; i<Size*Size; i++)
*(m+i) = 0.0;
}
/*-------------------------------------------------------
** Fan1() -- Calculate multiplier matrix
** Pay attention to the index. Index i give the range
** which starts from 0 to range-1. The real values of
** the index should be adjust and related with the value
** of t which is defined on the ForwardSub().
**-------------------------------------------------------
*/
__global__ void Fan1(half *m_cuda, half *a_cuda, int Size, int t)
{
//if(threadIdx.x + blockIdx.x * blockDim.x >= Size-1-t) printf(".");
//printf("blockIDx.x:%d,threadIdx.x:%d,Size:%d,t:%d,Size-1-t:%d\n",blockIdx.x,threadIdx.x,Size,t,Size-1-t);
if(threadIdx.x + blockIdx.x * blockDim.x >= Size-1-t) return;
int xidx = blockIdx.x * blockDim.x + threadIdx.x;
//~ *(m_cuda+Size*(blockDim.x*blockIdx.x+threadIdx.x+t+1)+t) = *(a_cuda+Size*(blockDim.x*blockIdx.x+threadIdx.x+t+1)+t) / *(a_cuda+Size*t+t);
m_cuda[Size*(xidx+t+1)+t] = a_cuda[Size*(xidx+t+1)+t] / a_cuda[Size*t+t];
}
/*-------------------------------------------------------
** Fan2() -- Modify the matrix A into LUD
**-------------------------------------------------------
*/
__global__ void Fan2(half *m_cuda, half *a_cuda, half *b_cuda,int Size, int j1, int t)
{
if(threadIdx.x + blockIdx.x * blockDim.x >= Size-1-t) return;
if(threadIdx.y + blockIdx.y * blockDim.y >= Size-t) return;
int xidx = blockIdx.x * blockDim.x + threadIdx.x;
int yidx = blockIdx.y * blockDim.y + threadIdx.y;
//printf("blockIdx.x:%d,threadIdx.x:%d,blockIdx.y:%d,threadIdx.y:%d,blockDim.x:%d,blockDim.y:%d\n",blockIdx.x,threadIdx.x,blockIdx.y,threadIdx.y,blockDim.x,blockDim.y);
a_cuda[Size*(xidx+1+t)+(yidx+t)] -= m_cuda[Size*(xidx+1+t)+t] * a_cuda[Size*t+(yidx+t)];
//a_cuda[xidx+1+t][yidx+t] -= m_cuda[xidx+1+t][t] * a_cuda[t][yidx+t];
if(yidx == 0){
//printf("blockIdx.x:%d,threadIdx.x:%d,blockIdx.y:%d,threadIdx.y:%d,blockDim.x:%d,blockDim.y:%d\n",blockIdx.x,threadIdx.x,blockIdx.y,threadIdx.y,blockDim.x,blockDim.y);
//printf("xidx:%d,yidx:%d\n",xidx,yidx);
b_cuda[xidx+1+t] -= m_cuda[Size*(xidx+1+t)+(yidx+t)] * b_cuda[t];
}
}
/*------------------------------------------------------
** ForwardSub() -- Forward substitution of Gaussian
** elimination.
**------------------------------------------------------
*/
void ForwardSub()
{
int t;
half *m_cuda,*a_cuda,*b_cuda;
half_float::half *m_half, *a_half,*b_half;
m_half = (half_float::half*)malloc( Size * Size * sizeof(half_float::half));
a_half = (half_float::half*)malloc( Size * Size * sizeof(half_float::half));
b_half = (half_float::half*)malloc( Size * sizeof(half_float::half));
for(int i =0;i<Size * Size;i ++){
m_half[i] = half_float::half(m[i]);
a_half[i] = half_float::half(a[i]);
}
for (int i=0; i<Size; i++){
b_half[i] = half_float::half(b[i]);
}
// allocate memory on GPU
cudaMalloc((void **) &m_cuda, Size * Size * sizeof(half));
cudaMalloc((void **) &a_cuda, Size * Size * sizeof(half));
cudaMalloc((void **) &b_cuda, Size * sizeof(half));
// copy memory to GPU
cudaMemcpy(m_cuda, m_half, Size * Size * sizeof(half),cudaMemcpyHostToDevice );
cudaMemcpy(a_cuda, a_half, Size * Size * sizeof(half),cudaMemcpyHostToDevice );
cudaMemcpy(b_cuda, b_half, Size * sizeof(half),cudaMemcpyHostToDevice );
int block_size,grid_size;
block_size = MAXBLOCKSIZE;
grid_size = (Size/block_size) + (!(Size%block_size)? 0:1);
//printf("1d grid size: %d\n",grid_size);
dim3 dimBlock(block_size);
dim3 dimGrid(grid_size);
//dim3 dimGrid( (N/dimBlock.x) + (!(N%dimBlock.x)?0:1) );
int blockSize2d, gridSize2d;
blockSize2d = BLOCK_SIZE_XY;
gridSize2d = (Size/blockSize2d) + (!(Size%blockSize2d?0:1));
dim3 dimBlockXY(blockSize2d,blockSize2d);
dim3 dimGridXY(gridSize2d,gridSize2d);
// begin timing kernels
struct timeval time_start;
gettimeofday(&time_start, NULL);
//~ t=0;
for (t=0; t<(Size-1); t++)
{
Fan1<<<dimGrid,dimBlock>>>(m_cuda,a_cuda,Size,t);
cudaThreadSynchronize();
Fan2<<<dimGridXY,dimBlockXY>>>(m_cuda,a_cuda,b_cuda,Size,Size-t,t);
cudaThreadSynchronize();
checkCUDAError("Fan2");
}
// end timing kernels
struct timeval time_end;
gettimeofday(&time_end, NULL);
totalKernelTime = (time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec);
// copy memory back to CPU
cudaMemcpy(m_half, m_cuda, Size * Size * sizeof(half),cudaMemcpyDeviceToHost );
cudaMemcpy(a_half, a_cuda, Size * Size * sizeof(half),cudaMemcpyDeviceToHost );
cudaMemcpy(b_half, b_cuda, Size * sizeof(half),cudaMemcpyDeviceToHost );
for(int i =0;i<Size * Size;i ++){
m[i] = float(m_half[i]);
a[i] = float(a_half[i]);
}
//~ for (int i=0; i<Size; i++){
//~ b[i] = float(b_half[i]);
//~ printf ("%f, ",float(b_half[i]));
//~ }
//~ printf("\n");
free(m_half);
free(a_half);
free(b_half);
cudaFree(m_cuda);
cudaFree(a_cuda);
cudaFree(b_cuda);
}
/*------------------------------------------------------
** BackSub() -- Backward substitution
**------------------------------------------------------
*/
void BackSub()
{
// create a new vector to hold the final answer
finalVec = (float *) malloc(Size * sizeof(float));
// solve "bottom up"
int i,j;
for(i=0;i<Size;i++){
finalVec[Size-i-1]=b[Size-i-1];
for(j=0;j<i;j++)
{
finalVec[Size-i-1]-=*(a+Size*(Size-i-1)+(Size-j-1)) * finalVec[Size-j-1];
}
finalVec[Size-i-1]=finalVec[Size-i-1]/ *(a+Size*(Size-i-1)+(Size-i-1));
}
}
void InitMat(float *ary, int nrow, int ncol)
{
int i, j;
for (i=0; i<nrow; i++) {
for (j=0; j<ncol; j++) {
fscanf(fp, "%f", ary+Size*i+j);
}
}
}
/*------------------------------------------------------
** PrintMat() -- Print the contents of the matrix
**------------------------------------------------------
*/
void PrintMat(float *ary, int nrow, int ncol)
{
int i, j;
for (i=0; i<nrow; i++) {
for (j=0; j<ncol; j++) {
printf("%8.2f ", *(ary+Size*i+j));
}
printf("\n");
}
printf("\n");
}
/*------------------------------------------------------
** InitAry() -- Initialize the array (vector) by reading
** data from the data file
**------------------------------------------------------
*/
void InitAry(float *ary, int ary_size)
{
int i;
for (i=0; i<ary_size; i++) {
fscanf(fp, "%f", &ary[i]);
}
}
/*------------------------------------------------------
** PrintAry() -- Print the contents of the array (vector)
**------------------------------------------------------
*/
void PrintAry(float *ary, int ary_size)
{
int i;
for (i=0; i<ary_size; i++) {
printf("%.2f ", ary[i]);
}
printf("\n\n");
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg,
cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
|
4c1b90686535a12bae1a65269b273d7c324e9f1f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <assert.h>
#include <helper_cuda.h>
#include "scan_common.h"
//All three kernels run 512 threads per workgroup
//Must be a power of two
#define THREADBLOCK_SIZE 512
#define LOOP_PERTHREAD 16
#define LOOP_PERTHREAD2 16
////////////////////////////////////////////////////////////////////////////////
// Basic ccan codelets
////////////////////////////////////////////////////////////////////////////////
//Naive inclusive scan: O(N * log2(N)) operations
//Allocate 2 * 'size' local memory, initialize the first half
//with 'size' zeros avoiding if(pos >= offset) condition evaluation
//and saving instructions
inline __device__ uint scan1Inclusive(uint idata, volatile uint *s_Data, uint size)
{
uint pos = 2 * threadIdx.x - (threadIdx.x & (size - 1));
s_Data[pos] = 0;
pos += size;
s_Data[pos] = idata;
for (uint offset = 1; offset < size; offset <<= 1)
{
__syncthreads();
uint t = s_Data[pos] + s_Data[pos - offset];
__syncthreads();
s_Data[pos] = t;
}
return s_Data[pos];
}
inline __device__ uint scan1Exclusive(uint idata, volatile uint *s_Data, uint size)
{
return scan1Inclusive(idata, s_Data, size) - idata;
}
inline __device__ uint4 scan4Inclusive(uint4 idata4, volatile uint *s_Data, uint size)
{
//Level-0 inclusive scan
idata4.y += idata4.x;
idata4.z += idata4.y;
idata4.w += idata4.z;
//Level-1 exclusive scan
uint oval = scan1Exclusive(idata4.w, s_Data, size / 4);
idata4.x += oval;
idata4.y += oval;
idata4.z += oval;
idata4.w += oval;
return idata4;
}
//Exclusive vector scan: the array to be scanned is stored
//in local thread memory scope as uint4
inline __device__ uint4 scan4Exclusive(uint4 idata4, volatile uint *s_Data, uint size)
{
uint4 odata4 = scan4Inclusive(idata4, s_Data, size);
odata4.x -= idata4.x;
odata4.y -= idata4.y;
odata4.z -= idata4.z;
odata4.w -= idata4.w;
return odata4;
}
////////////////////////////////////////////////////////////////////////////////
// Scan kernels
////////////////////////////////////////////////////////////////////////////////
__global__ void scanExclusiveShared(
uint4 *d_Dst,
uint4 *d_Src,
uint size
)
{
__shared__ uint s_Data[2 * THREADBLOCK_SIZE];
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
//Load data
uint4 idata4 = d_Src[pos];
//Calculate exclusive scan
uint4 odata4 = scan4Exclusive(idata4, s_Data, size);
//Write back
d_Dst[pos] = odata4;
}
//Exclusive scan of top elements of bottom-level scans (4 * THREADBLOCK_SIZE)
__global__ void scanExclusiveShared2(
uint *d_Buf,
uint *d_Dst,
uint *d_Src,
uint N,
uint arrayLength
)
{
__shared__ uint s_Data[2 * THREADBLOCK_SIZE];
//Skip loads and stores for inactive threads of last threadblock (pos >= N)
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
//Load top elements
//Convert results of bottom-level scan back to inclusive
uint idata = 0;
if (pos < N)
idata =
d_Dst[(4 * THREADBLOCK_SIZE) - 1 + (4 * THREADBLOCK_SIZE) * pos] + d_Src[(4 * THREADBLOCK_SIZE) - 1 + (4 * THREADBLOCK_SIZE) * pos];
//Compute
uint odata = scan1Exclusive(idata, s_Data, arrayLength);
//Avoid out-of-bound access
if (pos < N)
{
d_Buf[pos] = odata;
}
}
__global__ void scanExclusiveShared3(
uint *e_Buf,
uint *d_Buf,
uint *d_Dst,
uint *d_Src,
uint N,
uint arrayLength
)
{
__shared__ uint s_Data[2 * THREADBLOCK_SIZE];
//Skip loads and stores for inactive threads of last threadblock (pos >= N)
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
//Load top elements
//Convert results of bottom-level scan back to inclusive
uint idata = 0;
if (pos < N)
idata =
d_Buf[THREADBLOCK_SIZE -1 + pos * THREADBLOCK_SIZE] + d_Dst[(4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE) - 1 + (4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE) * pos] + d_Src[(4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE) - 1 + (4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE) * pos];
//Compute
uint odata = scan1Exclusive(idata, s_Data, arrayLength);
//Avoid out-of-bound access
if (pos < N)
{
e_Buf[pos] = odata;
}
}
//Final step of large-array scan: combine basic inclusive scan with exclusive scan of top elements of input arrays
__global__ void uniformUpdate(
uint4 *d_Data,
uint *d_Buffer
)
{
__shared__ uint buf;
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
if (threadIdx.x == 0)
{
buf = d_Buffer[blockIdx.x];
}
__syncthreads();
uint4 data4 = d_Data[pos];
data4.x += buf;
data4.y += buf;
data4.z += buf;
data4.w += buf;
d_Data[pos] = data4;
}
__global__ void uniformUpdate2(
uint4 *d_Data,
uint *d_Buffer
)
{
__shared__ uint buf;
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
uint temp = blockIdx.x/THREADBLOCK_SIZE;
if (threadIdx.x == 0)
{
buf = d_Buffer[temp];
}
__syncthreads();
uint4 data4 = d_Data[pos];
data4.x += buf;
data4.y += buf;
data4.z += buf;
data4.w += buf;
d_Data[pos] = data4;
}
__global__ void diff_kernel(
uint *d_Data,
uint *d_Src,
uint pnum,
uint length,
uint size
)
{
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
uint p_n = pnum;
uint len = length;
uint POS = pos * LOOP_PERTHREAD;
uint i;
for(i = POS ; (i < POS + LOOP_PERTHREAD)&&(i < len-1); i++){
d_Data[i] = d_Src[(i+1)*p_n] - d_Src[i * p_n];
}
if(i == (len-1)){
d_Data[len-1] = size - d_Src[(len-1)*p_n];
}
}
__global__ void transport_kernel(
uint *d_Data,
uint *d_Src,
uint pnum,
uint length,
uint size
)
{
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
uint p_n = pnum;
uint len = length;
uint POS = pos * LOOP_PERTHREAD2;
uint i;
for(i = POS ; (i < POS + LOOP_PERTHREAD2)&&(i < len); i++){
d_Data[i] = d_Src[i * p_n];
}
if(i == len){
d_Data[len] = size;
}
}
////////////////////////////////////////////////////////////////////////////////
// Interface function
////////////////////////////////////////////////////////////////////////////////
//Derived as 32768 (max power-of-two gridDim.x) * 4 * THREADBLOCK_SIZE
//Due to scanExclusiveShared<<<>>>() 1D block addressing
extern "C" const uint MAX_BATCH_ELEMENTS = 4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE * THREADBLOCK_SIZE;
extern "C" const uint MIN_SHORT_ARRAY_SIZE = 4;
extern "C" const uint MAX_SHORT_ARRAY_SIZE = 4 * THREADBLOCK_SIZE;
extern "C" const uint MIN_LARGE_ARRAY_SIZE = 8 * THREADBLOCK_SIZE;
extern "C" const uint MAX_LARGE_ARRAY_SIZE = 4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE;
extern "C" const uint MIN_LL_SIZE = 8 * THREADBLOCK_SIZE * THREADBLOCK_SIZE;
extern "C" const uint MAX_LL_SIZE = MAX_BATCH_ELEMENTS;//4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE * THREADBLOCK_SIZE;
//Internal exclusive scan buffer
static uint *d_Buf;
static uint *e_Buf;
extern "C" void initScan(void)
{
checkCudaErrors(hipMalloc((void **)&d_Buf, (MAX_BATCH_ELEMENTS / (4 * THREADBLOCK_SIZE)) * sizeof(uint)));
checkCudaErrors(hipMalloc((void **)&e_Buf, (MAX_BATCH_ELEMENTS / (4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE)) * sizeof(uint)));
}
extern "C" void closeScan(void)
{
checkCudaErrors(hipFree(d_Buf));
checkCudaErrors(hipFree(e_Buf));
}
static uint factorRadix2(uint &log2L, uint L)
{
if (!L)
{
log2L = 0;
return 0;
}
else
{
for (log2L = 0; (L & 1) == 0; L >>= 1, log2L++);
return L;
}
}
static uint iDivUp(uint dividend, uint divisor)
{
return ((dividend % divisor) == 0) ? (dividend / divisor) : (dividend / divisor + 1);
}
extern "C" size_t scanExclusiveShort(
uint *d_Dst,
uint *d_Src,
uint batchSize,
uint arrayLength
)
{
//Check power-of-two factorization
uint log2L;
uint factorizationRemainder = factorRadix2(log2L, arrayLength);
assert(factorizationRemainder == 1);
//Check supported size range
assert((arrayLength >= MIN_SHORT_ARRAY_SIZE) && (arrayLength <= MAX_SHORT_ARRAY_SIZE));
//Check total batch size limit
assert((batchSize * arrayLength) <= MAX_BATCH_ELEMENTS);
//Check all threadblocks to be fully packed with data
assert((batchSize * arrayLength) % (4 * THREADBLOCK_SIZE) == 0);
hipLaunchKernelGGL(( scanExclusiveShared), dim3((batchSize * arrayLength) / (4 * THREADBLOCK_SIZE)), dim3(THREADBLOCK_SIZE), 0, 0,
(uint4 *)d_Dst,
(uint4 *)d_Src,
arrayLength
);
getLastCudaError("scanExclusiveShared() execution FAILED\n");
return THREADBLOCK_SIZE;
}
extern "C" size_t scanExclusiveLarge(
uint *d_Dst,
uint *d_Src,
uint batchSize,
uint arrayLength
)
{
//Check power-of-two factorization
/*
uint log2L;
uint factorizationRemainder = factorRadix2(log2L, arrayLength);
assert(factorizationRemainder == 1);
*/
assert(arrayLength%MAX_SHORT_ARRAY_SIZE == 0);
//Check supported size range
assert((arrayLength >= MIN_LARGE_ARRAY_SIZE) && (arrayLength <= MAX_LARGE_ARRAY_SIZE));
//Check total batch size limit
assert((batchSize * arrayLength) <= MAX_BATCH_ELEMENTS);
hipLaunchKernelGGL(( scanExclusiveShared), dim3((batchSize * arrayLength) / (4 * THREADBLOCK_SIZE)), dim3(THREADBLOCK_SIZE), 0, 0,
(uint4 *)d_Dst,
(uint4 *)d_Src,
4 * THREADBLOCK_SIZE
);
getLastCudaError("scanExclusiveShared() execution FAILED\n");
//Not all threadblocks need to be packed with input data:
//inactive threads of highest threadblock just don't do global reads and writes
uint array_temp = THREADBLOCK_SIZE;
for(uint i = 2; i<=THREADBLOCK_SIZE ; i <<= 1){
if(i >= arrayLength/(4 * THREADBLOCK_SIZE)){
array_temp = i;
break;
}
}
const uint blockCount2 = 1;//iDivUp((batchSize * arrayLength) / (4 * THREADBLOCK_SIZE), THREADBLOCK_SIZE);
hipLaunchKernelGGL(( scanExclusiveShared2), dim3(blockCount2), dim3(THREADBLOCK_SIZE), 0, 0,
(uint *)d_Buf,
(uint *)d_Dst,
(uint *)d_Src,
arrayLength / (4 * THREADBLOCK_SIZE),
array_temp
);
getLastCudaError("scanExclusiveShared2() execution FAILED\n");
hipLaunchKernelGGL(( uniformUpdate), dim3((arrayLength) / (4 * THREADBLOCK_SIZE)), dim3(THREADBLOCK_SIZE), 0, 0,
(uint4 *)d_Dst,
(uint *)d_Buf
);
getLastCudaError("uniformUpdate() execution FAILED\n");
return THREADBLOCK_SIZE;
}
extern "C" size_t scanExclusiveLL(
uint *d_Dst,
uint *d_Src,
uint batchSize,
uint arrayLength
)
{
//Check power-of-two factorization
/*
uint log2L;
uint factorizationRemainder = factorRadix2(log2L, arrayLength);
assert(factorizationRemainder == 1);
*/
assert((arrayLength%MAX_LARGE_ARRAY_SIZE) == 0);
//Check supported size range
assert((arrayLength >= MIN_LL_SIZE) && (arrayLength <= MAX_LL_SIZE));
//Check total batch size limit
assert((batchSize * arrayLength) <= MAX_BATCH_ELEMENTS);
hipLaunchKernelGGL(( scanExclusiveShared), dim3((batchSize * arrayLength) / (4 * THREADBLOCK_SIZE)), dim3(THREADBLOCK_SIZE), 0, 0,
(uint4 *)d_Dst,
(uint4 *)d_Src,
4 * THREADBLOCK_SIZE
);
getLastCudaError("scanExclusiveShared() execution FAILED\n");
checkCudaErrors(hipDeviceSynchronize());
//Now ,prefix sum per THREADBLOCK_SIZE done
//Not all threadblocks need to be packed with input data:
//inactive threads of highest threadblock just don't do global reads and writes
const uint blockCount2 = iDivUp((batchSize * arrayLength) / (4 * THREADBLOCK_SIZE), THREADBLOCK_SIZE);
hipLaunchKernelGGL(( scanExclusiveShared2), dim3(blockCount2), dim3(THREADBLOCK_SIZE), 0, 0,
(uint *)d_Buf,
(uint *)d_Dst,
(uint *)d_Src,
(batchSize *arrayLength) / (4 * THREADBLOCK_SIZE),
THREADBLOCK_SIZE
);
getLastCudaError("scanExclusiveShared2() execution FAILED\n");
checkCudaErrors(hipDeviceSynchronize());
//prefix sum of last elements per THREADBLOCK_SIZE done
//this prefix sum can caluculate under only THREADBLOCK_SIZE size.
//so We need one more prefix sum for last elements.
uint array_temp = THREADBLOCK_SIZE;
for(uint i = 2; i<=THREADBLOCK_SIZE ; i <<= 1){
if(i >= arrayLength/(4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE)){
array_temp = i;
break;
}
}
const uint blockCount3 = 1;//(batchSize * arrayLength) / (4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE);
hipLaunchKernelGGL(( scanExclusiveShared3), dim3(blockCount3), dim3(THREADBLOCK_SIZE), 0, 0,
(uint *)e_Buf,
(uint *)d_Buf,
(uint *)d_Dst,
(uint *)d_Src,
arrayLength / (4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE),
array_temp
//arrayLength / (4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE)
);
getLastCudaError("scanExclusiveShared3() execution FAILED\n");
checkCudaErrors(hipDeviceSynchronize());
//add d_Buf to each array of d_Dst
hipLaunchKernelGGL(( uniformUpdate), dim3((batchSize *arrayLength) / (4 * THREADBLOCK_SIZE )), dim3(THREADBLOCK_SIZE), 0, 0,
(uint4 *)d_Dst,
(uint *)d_Buf
);
//add e_Buf to each array of d_Dst
checkCudaErrors(hipDeviceSynchronize());
hipLaunchKernelGGL(( uniformUpdate2), dim3((batchSize *arrayLength) / (4 * THREADBLOCK_SIZE )), dim3(THREADBLOCK_SIZE), 0, 0,
(uint4 *)d_Dst,
(uint *)e_Buf
);
getLastCudaError("uniformUpdate() execution FAILED\n");
checkCudaErrors(hipDeviceSynchronize());
return THREADBLOCK_SIZE;
}
extern "C" size_t diff_Part(
uint *d_Dst,
uint *d_Src,
uint diff,
uint arrayLength,
uint size
)
{
//Check total batch size limit
//assert((arrayLength) <= MAX_BATCH_ELEMENTS);
const uint blockCount = iDivUp(arrayLength , LOOP_PERTHREAD*THREADBLOCK_SIZE);
hipLaunchKernelGGL(( diff_kernel), dim3(blockCount), dim3(THREADBLOCK_SIZE), 0, 0,
d_Dst,
d_Src,
diff,
arrayLength,
size
);
getLastCudaError("diff_Part() execution FAILED\n");
checkCudaErrors(hipDeviceSynchronize());
return THREADBLOCK_SIZE;
}
extern "C" void transport_gpu(
uint *d_Dst,
uint *d_Src,
uint diff,
uint arrayLength,
uint size
)
{
//Check total batch size limit
//assert((arrayLength) <= MAX_BATCH_ELEMENTS);
const uint blockCount = iDivUp(arrayLength , LOOP_PERTHREAD2*THREADBLOCK_SIZE);
hipLaunchKernelGGL(( transport_kernel), dim3(blockCount), dim3(THREADBLOCK_SIZE), 0, 0,
d_Dst,
d_Src,
diff,
arrayLength,
size
);
getLastCudaError("transport_gpu() execution FAILED\n");
checkCudaErrors(hipDeviceSynchronize());
}
| 4c1b90686535a12bae1a65269b273d7c324e9f1f.cu | /*
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <assert.h>
#include <helper_cuda.h>
#include "scan_common.h"
//All three kernels run 512 threads per workgroup
//Must be a power of two
#define THREADBLOCK_SIZE 512
#define LOOP_PERTHREAD 16
#define LOOP_PERTHREAD2 16
////////////////////////////////////////////////////////////////////////////////
// Basic ccan codelets
////////////////////////////////////////////////////////////////////////////////
//Naive inclusive scan: O(N * log2(N)) operations
//Allocate 2 * 'size' local memory, initialize the first half
//with 'size' zeros avoiding if(pos >= offset) condition evaluation
//and saving instructions
inline __device__ uint scan1Inclusive(uint idata, volatile uint *s_Data, uint size)
{
uint pos = 2 * threadIdx.x - (threadIdx.x & (size - 1));
s_Data[pos] = 0;
pos += size;
s_Data[pos] = idata;
for (uint offset = 1; offset < size; offset <<= 1)
{
__syncthreads();
uint t = s_Data[pos] + s_Data[pos - offset];
__syncthreads();
s_Data[pos] = t;
}
return s_Data[pos];
}
inline __device__ uint scan1Exclusive(uint idata, volatile uint *s_Data, uint size)
{
return scan1Inclusive(idata, s_Data, size) - idata;
}
inline __device__ uint4 scan4Inclusive(uint4 idata4, volatile uint *s_Data, uint size)
{
//Level-0 inclusive scan
idata4.y += idata4.x;
idata4.z += idata4.y;
idata4.w += idata4.z;
//Level-1 exclusive scan
uint oval = scan1Exclusive(idata4.w, s_Data, size / 4);
idata4.x += oval;
idata4.y += oval;
idata4.z += oval;
idata4.w += oval;
return idata4;
}
//Exclusive vector scan: the array to be scanned is stored
//in local thread memory scope as uint4
inline __device__ uint4 scan4Exclusive(uint4 idata4, volatile uint *s_Data, uint size)
{
uint4 odata4 = scan4Inclusive(idata4, s_Data, size);
odata4.x -= idata4.x;
odata4.y -= idata4.y;
odata4.z -= idata4.z;
odata4.w -= idata4.w;
return odata4;
}
////////////////////////////////////////////////////////////////////////////////
// Scan kernels
////////////////////////////////////////////////////////////////////////////////
__global__ void scanExclusiveShared(
uint4 *d_Dst,
uint4 *d_Src,
uint size
)
{
__shared__ uint s_Data[2 * THREADBLOCK_SIZE];
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
//Load data
uint4 idata4 = d_Src[pos];
//Calculate exclusive scan
uint4 odata4 = scan4Exclusive(idata4, s_Data, size);
//Write back
d_Dst[pos] = odata4;
}
//Exclusive scan of top elements of bottom-level scans (4 * THREADBLOCK_SIZE)
__global__ void scanExclusiveShared2(
uint *d_Buf,
uint *d_Dst,
uint *d_Src,
uint N,
uint arrayLength
)
{
__shared__ uint s_Data[2 * THREADBLOCK_SIZE];
//Skip loads and stores for inactive threads of last threadblock (pos >= N)
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
//Load top elements
//Convert results of bottom-level scan back to inclusive
uint idata = 0;
if (pos < N)
idata =
d_Dst[(4 * THREADBLOCK_SIZE) - 1 + (4 * THREADBLOCK_SIZE) * pos] + d_Src[(4 * THREADBLOCK_SIZE) - 1 + (4 * THREADBLOCK_SIZE) * pos];
//Compute
uint odata = scan1Exclusive(idata, s_Data, arrayLength);
//Avoid out-of-bound access
if (pos < N)
{
d_Buf[pos] = odata;
}
}
__global__ void scanExclusiveShared3(
uint *e_Buf,
uint *d_Buf,
uint *d_Dst,
uint *d_Src,
uint N,
uint arrayLength
)
{
__shared__ uint s_Data[2 * THREADBLOCK_SIZE];
//Skip loads and stores for inactive threads of last threadblock (pos >= N)
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
//Load top elements
//Convert results of bottom-level scan back to inclusive
uint idata = 0;
if (pos < N)
idata =
d_Buf[THREADBLOCK_SIZE -1 + pos * THREADBLOCK_SIZE] + d_Dst[(4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE) - 1 + (4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE) * pos] + d_Src[(4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE) - 1 + (4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE) * pos];
//Compute
uint odata = scan1Exclusive(idata, s_Data, arrayLength);
//Avoid out-of-bound access
if (pos < N)
{
e_Buf[pos] = odata;
}
}
//Final step of large-array scan: combine basic inclusive scan with exclusive scan of top elements of input arrays
__global__ void uniformUpdate(
uint4 *d_Data,
uint *d_Buffer
)
{
__shared__ uint buf;
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
if (threadIdx.x == 0)
{
buf = d_Buffer[blockIdx.x];
}
__syncthreads();
uint4 data4 = d_Data[pos];
data4.x += buf;
data4.y += buf;
data4.z += buf;
data4.w += buf;
d_Data[pos] = data4;
}
__global__ void uniformUpdate2(
uint4 *d_Data,
uint *d_Buffer
)
{
__shared__ uint buf;
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
uint temp = blockIdx.x/THREADBLOCK_SIZE;
if (threadIdx.x == 0)
{
buf = d_Buffer[temp];
}
__syncthreads();
uint4 data4 = d_Data[pos];
data4.x += buf;
data4.y += buf;
data4.z += buf;
data4.w += buf;
d_Data[pos] = data4;
}
__global__ void diff_kernel(
uint *d_Data,
uint *d_Src,
uint pnum,
uint length,
uint size
)
{
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
uint p_n = pnum;
uint len = length;
uint POS = pos * LOOP_PERTHREAD;
uint i;
for(i = POS ; (i < POS + LOOP_PERTHREAD)&&(i < len-1); i++){
d_Data[i] = d_Src[(i+1)*p_n] - d_Src[i * p_n];
}
if(i == (len-1)){
d_Data[len-1] = size - d_Src[(len-1)*p_n];
}
}
__global__ void transport_kernel(
uint *d_Data,
uint *d_Src,
uint pnum,
uint length,
uint size
)
{
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
uint p_n = pnum;
uint len = length;
uint POS = pos * LOOP_PERTHREAD2;
uint i;
for(i = POS ; (i < POS + LOOP_PERTHREAD2)&&(i < len); i++){
d_Data[i] = d_Src[i * p_n];
}
if(i == len){
d_Data[len] = size;
}
}
////////////////////////////////////////////////////////////////////////////////
// Interface function
////////////////////////////////////////////////////////////////////////////////
//Derived as 32768 (max power-of-two gridDim.x) * 4 * THREADBLOCK_SIZE
//Due to scanExclusiveShared<<<>>>() 1D block addressing
extern "C" const uint MAX_BATCH_ELEMENTS = 4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE * THREADBLOCK_SIZE;
extern "C" const uint MIN_SHORT_ARRAY_SIZE = 4;
extern "C" const uint MAX_SHORT_ARRAY_SIZE = 4 * THREADBLOCK_SIZE;
extern "C" const uint MIN_LARGE_ARRAY_SIZE = 8 * THREADBLOCK_SIZE;
extern "C" const uint MAX_LARGE_ARRAY_SIZE = 4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE;
extern "C" const uint MIN_LL_SIZE = 8 * THREADBLOCK_SIZE * THREADBLOCK_SIZE;
extern "C" const uint MAX_LL_SIZE = MAX_BATCH_ELEMENTS;//4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE * THREADBLOCK_SIZE;
//Internal exclusive scan buffer
static uint *d_Buf;
static uint *e_Buf;
extern "C" void initScan(void)
{
checkCudaErrors(cudaMalloc((void **)&d_Buf, (MAX_BATCH_ELEMENTS / (4 * THREADBLOCK_SIZE)) * sizeof(uint)));
checkCudaErrors(cudaMalloc((void **)&e_Buf, (MAX_BATCH_ELEMENTS / (4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE)) * sizeof(uint)));
}
extern "C" void closeScan(void)
{
checkCudaErrors(cudaFree(d_Buf));
checkCudaErrors(cudaFree(e_Buf));
}
static uint factorRadix2(uint &log2L, uint L)
{
if (!L)
{
log2L = 0;
return 0;
}
else
{
for (log2L = 0; (L & 1) == 0; L >>= 1, log2L++);
return L;
}
}
static uint iDivUp(uint dividend, uint divisor)
{
return ((dividend % divisor) == 0) ? (dividend / divisor) : (dividend / divisor + 1);
}
extern "C" size_t scanExclusiveShort(
uint *d_Dst,
uint *d_Src,
uint batchSize,
uint arrayLength
)
{
//Check power-of-two factorization
uint log2L;
uint factorizationRemainder = factorRadix2(log2L, arrayLength);
assert(factorizationRemainder == 1);
//Check supported size range
assert((arrayLength >= MIN_SHORT_ARRAY_SIZE) && (arrayLength <= MAX_SHORT_ARRAY_SIZE));
//Check total batch size limit
assert((batchSize * arrayLength) <= MAX_BATCH_ELEMENTS);
//Check all threadblocks to be fully packed with data
assert((batchSize * arrayLength) % (4 * THREADBLOCK_SIZE) == 0);
scanExclusiveShared<<<(batchSize * arrayLength) / (4 * THREADBLOCK_SIZE), THREADBLOCK_SIZE>>>(
(uint4 *)d_Dst,
(uint4 *)d_Src,
arrayLength
);
getLastCudaError("scanExclusiveShared() execution FAILED\n");
return THREADBLOCK_SIZE;
}
extern "C" size_t scanExclusiveLarge(
uint *d_Dst,
uint *d_Src,
uint batchSize,
uint arrayLength
)
{
//Check power-of-two factorization
/*
uint log2L;
uint factorizationRemainder = factorRadix2(log2L, arrayLength);
assert(factorizationRemainder == 1);
*/
assert(arrayLength%MAX_SHORT_ARRAY_SIZE == 0);
//Check supported size range
assert((arrayLength >= MIN_LARGE_ARRAY_SIZE) && (arrayLength <= MAX_LARGE_ARRAY_SIZE));
//Check total batch size limit
assert((batchSize * arrayLength) <= MAX_BATCH_ELEMENTS);
scanExclusiveShared<<<(batchSize * arrayLength) / (4 * THREADBLOCK_SIZE), THREADBLOCK_SIZE>>>(
(uint4 *)d_Dst,
(uint4 *)d_Src,
4 * THREADBLOCK_SIZE
);
getLastCudaError("scanExclusiveShared() execution FAILED\n");
//Not all threadblocks need to be packed with input data:
//inactive threads of highest threadblock just don't do global reads and writes
uint array_temp = THREADBLOCK_SIZE;
for(uint i = 2; i<=THREADBLOCK_SIZE ; i <<= 1){
if(i >= arrayLength/(4 * THREADBLOCK_SIZE)){
array_temp = i;
break;
}
}
const uint blockCount2 = 1;//iDivUp((batchSize * arrayLength) / (4 * THREADBLOCK_SIZE), THREADBLOCK_SIZE);
scanExclusiveShared2<<< blockCount2, THREADBLOCK_SIZE>>>(
(uint *)d_Buf,
(uint *)d_Dst,
(uint *)d_Src,
arrayLength / (4 * THREADBLOCK_SIZE),
array_temp
);
getLastCudaError("scanExclusiveShared2() execution FAILED\n");
uniformUpdate<<<(arrayLength) / (4 * THREADBLOCK_SIZE), THREADBLOCK_SIZE>>>(
(uint4 *)d_Dst,
(uint *)d_Buf
);
getLastCudaError("uniformUpdate() execution FAILED\n");
return THREADBLOCK_SIZE;
}
extern "C" size_t scanExclusiveLL(
uint *d_Dst,
uint *d_Src,
uint batchSize,
uint arrayLength
)
{
//Check power-of-two factorization
/*
uint log2L;
uint factorizationRemainder = factorRadix2(log2L, arrayLength);
assert(factorizationRemainder == 1);
*/
assert((arrayLength%MAX_LARGE_ARRAY_SIZE) == 0);
//Check supported size range
assert((arrayLength >= MIN_LL_SIZE) && (arrayLength <= MAX_LL_SIZE));
//Check total batch size limit
assert((batchSize * arrayLength) <= MAX_BATCH_ELEMENTS);
scanExclusiveShared<<<(batchSize * arrayLength) / (4 * THREADBLOCK_SIZE), THREADBLOCK_SIZE>>>(
(uint4 *)d_Dst,
(uint4 *)d_Src,
4 * THREADBLOCK_SIZE
);
getLastCudaError("scanExclusiveShared() execution FAILED\n");
checkCudaErrors(cudaDeviceSynchronize());
//Now ,prefix sum per THREADBLOCK_SIZE done
//Not all threadblocks need to be packed with input data:
//inactive threads of highest threadblock just don't do global reads and writes
const uint blockCount2 = iDivUp((batchSize * arrayLength) / (4 * THREADBLOCK_SIZE), THREADBLOCK_SIZE);
scanExclusiveShared2<<< blockCount2, THREADBLOCK_SIZE>>>(
(uint *)d_Buf,
(uint *)d_Dst,
(uint *)d_Src,
(batchSize *arrayLength) / (4 * THREADBLOCK_SIZE),
THREADBLOCK_SIZE
);
getLastCudaError("scanExclusiveShared2() execution FAILED\n");
checkCudaErrors(cudaDeviceSynchronize());
//prefix sum of last elements per THREADBLOCK_SIZE done
//this prefix sum can caluculate under only THREADBLOCK_SIZE size.
//so We need one more prefix sum for last elements.
uint array_temp = THREADBLOCK_SIZE;
for(uint i = 2; i<=THREADBLOCK_SIZE ; i <<= 1){
if(i >= arrayLength/(4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE)){
array_temp = i;
break;
}
}
const uint blockCount3 = 1;//(batchSize * arrayLength) / (4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE);
scanExclusiveShared3<<< blockCount3, THREADBLOCK_SIZE>>>(
(uint *)e_Buf,
(uint *)d_Buf,
(uint *)d_Dst,
(uint *)d_Src,
arrayLength / (4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE),
array_temp
//arrayLength / (4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE)
);
getLastCudaError("scanExclusiveShared3() execution FAILED\n");
checkCudaErrors(cudaDeviceSynchronize());
//add d_Buf to each array of d_Dst
uniformUpdate<<<(batchSize *arrayLength) / (4 * THREADBLOCK_SIZE ), THREADBLOCK_SIZE>>>(
(uint4 *)d_Dst,
(uint *)d_Buf
);
//add e_Buf to each array of d_Dst
checkCudaErrors(cudaDeviceSynchronize());
uniformUpdate2<<<(batchSize *arrayLength) / (4 * THREADBLOCK_SIZE ), THREADBLOCK_SIZE>>>(
(uint4 *)d_Dst,
(uint *)e_Buf
);
getLastCudaError("uniformUpdate() execution FAILED\n");
checkCudaErrors(cudaDeviceSynchronize());
return THREADBLOCK_SIZE;
}
extern "C" size_t diff_Part(
uint *d_Dst,
uint *d_Src,
uint diff,
uint arrayLength,
uint size
)
{
//Check total batch size limit
//assert((arrayLength) <= MAX_BATCH_ELEMENTS);
const uint blockCount = iDivUp(arrayLength , LOOP_PERTHREAD*THREADBLOCK_SIZE);
diff_kernel<<<blockCount, THREADBLOCK_SIZE>>>(
d_Dst,
d_Src,
diff,
arrayLength,
size
);
getLastCudaError("diff_Part() execution FAILED\n");
checkCudaErrors(cudaDeviceSynchronize());
return THREADBLOCK_SIZE;
}
extern "C" void transport_gpu(
uint *d_Dst,
uint *d_Src,
uint diff,
uint arrayLength,
uint size
)
{
//Check total batch size limit
//assert((arrayLength) <= MAX_BATCH_ELEMENTS);
const uint blockCount = iDivUp(arrayLength , LOOP_PERTHREAD2*THREADBLOCK_SIZE);
transport_kernel<<<blockCount, THREADBLOCK_SIZE>>>(
d_Dst,
d_Src,
diff,
arrayLength,
size
);
getLastCudaError("transport_gpu() execution FAILED\n");
checkCudaErrors(cudaDeviceSynchronize());
}
|
91cf024adcc84ff60ca3329fefafd4ba31fe842f.hip | // !!! This is a file automatically generated by hipify!!!
#include <cmath>
#include <cstdlib>
#include <cstring>
#include <functional>
#include "caffe/backend/cuda/cuda_device.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/common.hpp"
#include "caffe/backend/backend.hpp"
#include "caffe/backend/vptr.hpp"
#include "caffe/backend/dev_ptr.hpp"
#include "caffe/backend/cuda/caffe_cuda.hpp"
#include "caffe/backend/cuda/cuda_dev_ptr.hpp"
#ifdef USE_ROCM
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#endif // USE_ROCM
namespace caffe {
#ifdef USE_ROCM
void CudaDevice::memcpy(const uint_tp n, vptr<const void> x, vptr<void> y) {
if (x.get_cuda_ptr() != y.get_cuda_ptr()) {
CHECK(x.get_cuda_ptr());
CHECK(y.get_cuda_ptr());
CUDA_CHECK(hipMemcpy(y.get_cuda_ptr(), x.get_cuda_ptr(),
n, hipMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
void CudaDevice::memcpy(const uint_tp n, const void* x, vptr<void> y) {
if (x != y.get_cuda_ptr()) {
CHECK(x);
CHECK(y.get_cuda_ptr());
CUDA_CHECK(hipMemcpy(y.get_cuda_ptr(), x,
n, hipMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
void CudaDevice::memcpy(const uint_tp n, vptr<const void> x, void* y) {
if (x.get_cuda_ptr() != y) {
CHECK(x.get_cuda_ptr());
CHECK(y);
CUDA_CHECK(hipMemcpy(y, x.get_cuda_ptr(),
n, hipMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
#endif // USE_ROCM
} // namespace caffe
| 91cf024adcc84ff60ca3329fefafd4ba31fe842f.cu | #include <cmath>
#include <cstdlib>
#include <cstring>
#include <functional>
#include "caffe/backend/cuda/cuda_device.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/common.hpp"
#include "caffe/backend/backend.hpp"
#include "caffe/backend/vptr.hpp"
#include "caffe/backend/dev_ptr.hpp"
#include "caffe/backend/cuda/caffe_cuda.hpp"
#include "caffe/backend/cuda/cuda_dev_ptr.hpp"
#ifdef USE_CUDA
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#endif // USE_CUDA
namespace caffe {
#ifdef USE_CUDA
void CudaDevice::memcpy(const uint_tp n, vptr<const void> x, vptr<void> y) {
if (x.get_cuda_ptr() != y.get_cuda_ptr()) {
CHECK(x.get_cuda_ptr());
CHECK(y.get_cuda_ptr());
CUDA_CHECK(cudaMemcpy(y.get_cuda_ptr(), x.get_cuda_ptr(),
n, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
void CudaDevice::memcpy(const uint_tp n, const void* x, vptr<void> y) {
if (x != y.get_cuda_ptr()) {
CHECK(x);
CHECK(y.get_cuda_ptr());
CUDA_CHECK(cudaMemcpy(y.get_cuda_ptr(), x,
n, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
void CudaDevice::memcpy(const uint_tp n, vptr<const void> x, void* y) {
if (x.get_cuda_ptr() != y) {
CHECK(x.get_cuda_ptr());
CHECK(y);
CUDA_CHECK(cudaMemcpy(y, x.get_cuda_ptr(),
n, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
#endif // USE_CUDA
} // namespace caffe
|
4a606cdd9c2f83d2f6f065f6c34b4d03ab652d16.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/VolumetricAdaptiveAveragePooling.cu"
#else
#include "../common.h"
// 5d tensor B x D x T x H x W
void THNN_(VolumetricAdaptiveAveragePooling_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
int osizeT,
int osizeW,
int osizeH)
{
THCUNN_assertSameGPU(state, 2, input, output);
THCUNN_argCheck(state, !input->is_empty() && (input->dim() == 4 || input->dim() == 5), 2, input,
"non-empty 4D or 5D (batch mode) tensor expected for input, but got: %s");
real *output_data;
real *input_data;
int64_t sizeD, isizeT, isizeH, isizeW;
int64_t istrideD, istrideT, istrideH, istrideW;
int64_t totalZ;
if (input->dim() == 4) {
sizeD = input->size(0);
isizeT = input->size(1);
isizeH = input->size(2);
isizeW = input->size(3);
istrideD = input->stride(0);
istrideT = input->stride(1);
istrideH = input->stride(2);
istrideW = input->stride(3);
THCTensor_(resize4d)(state, output, sizeD, osizeT, osizeH, osizeW);
totalZ = sizeD * osizeT;
} else {
input = THCTensor_(newContiguous)(state, input);
int64_t sizeB = input->size(0);
sizeD = input->size(1);
isizeT = input->size(2);
isizeH = input->size(3);
isizeW = input->size(4);
istrideD = input->stride(1);
istrideT = input->stride(2);
istrideH = input->stride(3);
istrideW = input->stride(4);
THCTensor_(resize5d)(state, output, sizeB, sizeD, osizeT, osizeH, osizeW);
totalZ = sizeB * sizeD * osizeT;
}
input_data = THCTensor_(data)(state, input);
output_data = THCTensor_(data)(state, output);
int64_t offsetZ = 0;
dim3 threads(32, 8);
// each H*W plane is processed by blocksH thread blocks
int blocksH = max((int)(16L / totalZ), 1);
while (totalZ > 0) {
dim3 blocks(totalZ > 65535 ? 65535 : totalZ, blocksH);
hipLaunchKernelGGL(( cunn_VolumetricAdaptiveAveragePooling_updateOutput_kernel)
, dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state),
input_data, output_data, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW,
istrideD, istrideT, istrideH, istrideW, offsetZ
);
totalZ -= 65535;
offsetZ += 65535;
THCudaCheck(hipGetLastError());
}
if (input->dim() == 5) {
// clean
THCTensor_(free)(state, input);
}
}
void THNN_(VolumetricAdaptiveAveragePooling_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput)
{
THCUNN_assertSameGPU(state, 3, input, gradOutput, gradInput);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
THCTensor_(resizeAs)(state, gradInput, input);
THCTensor_(zero)(state, gradInput);
real *gradInput_data;
real *gradOutput_data;
int64_t sizeD, isizeT, isizeH, isizeW;
int64_t osizeT, osizeH, osizeW;
int64_t totalZ;
if (input->dim() == 4) {
sizeD = input->size(0);
isizeT = input->size(1);
isizeH = input->size(2);
isizeW = input->size(3);
osizeT = gradOutput->size(1);
osizeH = gradOutput->size(2);
osizeW = gradOutput->size(3);
} else {
sizeD = input->size(1);
isizeT = input->size(2);
isizeH = input->size(3);
isizeW = input->size(4);
osizeT = gradOutput->size(2);
osizeH = gradOutput->size(3);
osizeW = gradOutput->size(4);
}
// somehow nonatomic is passing all test for volumetric case.
bool atomic = false; //(isizeW%osizeW != 0) || (isizeH%osizeH != 0) || (isizeT%osizeT != 0);
if (input->dim() == 4) {
totalZ = atomic ? sizeD * osizeT : sizeD * isizeT;
} else {
int sizeB = input->size(0);
totalZ = atomic ? sizeB * sizeD * osizeT : sizeB * sizeD * isizeT;
}
gradInput_data = THCTensor_(data)(state, gradInput);
gradOutput_data = THCTensor_(data)(state, gradOutput);
int64_t offsetZ = 0;
dim3 threads(32, 8);
// each H*W plane is processed by blocksH thread blocks
int blocksH = max((int)(16L / totalZ), 1);
while (totalZ > 0) {
dim3 blocks(totalZ > 65535 ? 65535 : totalZ, blocksH);
if (atomic)
{
hipLaunchKernelGGL(( cunn_atomic_VolumetricAdaptiveAveragePooling_updateGradInput_kernel)
, dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state),
gradInput_data, gradOutput_data, isizeT, isizeH, isizeW,
osizeT, osizeH, osizeW, offsetZ
);
} else {
hipLaunchKernelGGL(( cunn_VolumetricAdaptiveAveragePooling_updateGradInput_kernel)
, dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state),
gradInput_data, gradOutput_data, isizeT, isizeH, isizeW,
osizeT, osizeH, osizeW, offsetZ
);
}
totalZ -= 65535;
offsetZ += 65535;
THCudaCheck(hipGetLastError());
}
// clean
THCTensor_(free)(state, gradOutput);
}
#endif
| 4a606cdd9c2f83d2f6f065f6c34b4d03ab652d16.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/VolumetricAdaptiveAveragePooling.cu"
#else
#include "../common.h"
// 5d tensor B x D x T x H x W
void THNN_(VolumetricAdaptiveAveragePooling_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
int osizeT,
int osizeW,
int osizeH)
{
THCUNN_assertSameGPU(state, 2, input, output);
THCUNN_argCheck(state, !input->is_empty() && (input->dim() == 4 || input->dim() == 5), 2, input,
"non-empty 4D or 5D (batch mode) tensor expected for input, but got: %s");
real *output_data;
real *input_data;
int64_t sizeD, isizeT, isizeH, isizeW;
int64_t istrideD, istrideT, istrideH, istrideW;
int64_t totalZ;
if (input->dim() == 4) {
sizeD = input->size(0);
isizeT = input->size(1);
isizeH = input->size(2);
isizeW = input->size(3);
istrideD = input->stride(0);
istrideT = input->stride(1);
istrideH = input->stride(2);
istrideW = input->stride(3);
THCTensor_(resize4d)(state, output, sizeD, osizeT, osizeH, osizeW);
totalZ = sizeD * osizeT;
} else {
input = THCTensor_(newContiguous)(state, input);
int64_t sizeB = input->size(0);
sizeD = input->size(1);
isizeT = input->size(2);
isizeH = input->size(3);
isizeW = input->size(4);
istrideD = input->stride(1);
istrideT = input->stride(2);
istrideH = input->stride(3);
istrideW = input->stride(4);
THCTensor_(resize5d)(state, output, sizeB, sizeD, osizeT, osizeH, osizeW);
totalZ = sizeB * sizeD * osizeT;
}
input_data = THCTensor_(data)(state, input);
output_data = THCTensor_(data)(state, output);
int64_t offsetZ = 0;
dim3 threads(32, 8);
// each H*W plane is processed by blocksH thread blocks
int blocksH = max((int)(16L / totalZ), 1);
while (totalZ > 0) {
dim3 blocks(totalZ > 65535 ? 65535 : totalZ, blocksH);
cunn_VolumetricAdaptiveAveragePooling_updateOutput_kernel
<<<blocks, threads, 0, THCState_getCurrentStream(state)>>>(
input_data, output_data, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW,
istrideD, istrideT, istrideH, istrideW, offsetZ
);
totalZ -= 65535;
offsetZ += 65535;
THCudaCheck(cudaGetLastError());
}
if (input->dim() == 5) {
// clean
THCTensor_(free)(state, input);
}
}
void THNN_(VolumetricAdaptiveAveragePooling_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput)
{
THCUNN_assertSameGPU(state, 3, input, gradOutput, gradInput);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
THCTensor_(resizeAs)(state, gradInput, input);
THCTensor_(zero)(state, gradInput);
real *gradInput_data;
real *gradOutput_data;
int64_t sizeD, isizeT, isizeH, isizeW;
int64_t osizeT, osizeH, osizeW;
int64_t totalZ;
if (input->dim() == 4) {
sizeD = input->size(0);
isizeT = input->size(1);
isizeH = input->size(2);
isizeW = input->size(3);
osizeT = gradOutput->size(1);
osizeH = gradOutput->size(2);
osizeW = gradOutput->size(3);
} else {
sizeD = input->size(1);
isizeT = input->size(2);
isizeH = input->size(3);
isizeW = input->size(4);
osizeT = gradOutput->size(2);
osizeH = gradOutput->size(3);
osizeW = gradOutput->size(4);
}
// somehow nonatomic is passing all test for volumetric case.
bool atomic = false; //(isizeW%osizeW != 0) || (isizeH%osizeH != 0) || (isizeT%osizeT != 0);
if (input->dim() == 4) {
totalZ = atomic ? sizeD * osizeT : sizeD * isizeT;
} else {
int sizeB = input->size(0);
totalZ = atomic ? sizeB * sizeD * osizeT : sizeB * sizeD * isizeT;
}
gradInput_data = THCTensor_(data)(state, gradInput);
gradOutput_data = THCTensor_(data)(state, gradOutput);
int64_t offsetZ = 0;
dim3 threads(32, 8);
// each H*W plane is processed by blocksH thread blocks
int blocksH = max((int)(16L / totalZ), 1);
while (totalZ > 0) {
dim3 blocks(totalZ > 65535 ? 65535 : totalZ, blocksH);
if (atomic)
{
cunn_atomic_VolumetricAdaptiveAveragePooling_updateGradInput_kernel
<<<blocks, threads, 0, THCState_getCurrentStream(state)>>>(
gradInput_data, gradOutput_data, isizeT, isizeH, isizeW,
osizeT, osizeH, osizeW, offsetZ
);
} else {
cunn_VolumetricAdaptiveAveragePooling_updateGradInput_kernel
<<<blocks, threads, 0, THCState_getCurrentStream(state)>>>(
gradInput_data, gradOutput_data, isizeT, isizeH, isizeW,
osizeT, osizeH, osizeW, offsetZ
);
}
totalZ -= 65535;
offsetZ += 65535;
THCudaCheck(cudaGetLastError());
}
// clean
THCTensor_(free)(state, gradOutput);
}
#endif
|
647c3fcfc36605d43a1c44fe8786453dafa0769d.hip | // !!! This is a file automatically generated by hipify!!!
#include <math.h>
#include "hip/hip_runtime.h"
#include "npmmv_dense_kernel.h"
__global__ void negative_prob_multiply_dense_matrix_vector_kernel(float* matrix, float* in_vector, float* out_vector,
unsigned int outerdim, unsigned int innerdim) {
// We parallelize at the level of matrix rows,
unsigned int row = blockIdx.x*blockDim.x+threadIdx.x;
float prob = 1.0;
if (row < outerdim) {
// each thread computes one element of the output vector
for (int i = 0; i < innerdim; i++) {
prob *= 1.0 - (matrix[row * innerdim + i] * in_vector[i]);
}
out_vector[row] = prob;
}
}
void internal_negative_prob_multiply_dense_matrix_vector_gpu(float* matrix, float* in_vector, float* out_vector,
unsigned int outerdim, unsigned int innerdim) {
// declare the number of blocks per grid and the number of threads per block
// use 1 to 512 threads per block
dim3 threadsPerBlock(outerdim);
dim3 blocksPerGrid(1);
if (outerdim > 512) {
threadsPerBlock.x = 512;
blocksPerGrid.x = ceil(double(outerdim)/double(threadsPerBlock.x));
}
hipLaunchKernelGGL(( negative_prob_multiply_dense_matrix_vector_kernel), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, matrix, in_vector, out_vector, outerdim, innerdim);
}
| 647c3fcfc36605d43a1c44fe8786453dafa0769d.cu | #include <math.h>
#include "cuda_runtime.h"
#include "npmmv_dense_kernel.h"
__global__ void negative_prob_multiply_dense_matrix_vector_kernel(float* matrix, float* in_vector, float* out_vector,
unsigned int outerdim, unsigned int innerdim) {
// We parallelize at the level of matrix rows,
unsigned int row = blockIdx.x*blockDim.x+threadIdx.x;
float prob = 1.0;
if (row < outerdim) {
// each thread computes one element of the output vector
for (int i = 0; i < innerdim; i++) {
prob *= 1.0 - (matrix[row * innerdim + i] * in_vector[i]);
}
out_vector[row] = prob;
}
}
void internal_negative_prob_multiply_dense_matrix_vector_gpu(float* matrix, float* in_vector, float* out_vector,
unsigned int outerdim, unsigned int innerdim) {
// declare the number of blocks per grid and the number of threads per block
// use 1 to 512 threads per block
dim3 threadsPerBlock(outerdim);
dim3 blocksPerGrid(1);
if (outerdim > 512) {
threadsPerBlock.x = 512;
blocksPerGrid.x = ceil(double(outerdim)/double(threadsPerBlock.x));
}
negative_prob_multiply_dense_matrix_vector_kernel<<<blocksPerGrid,threadsPerBlock>>>(matrix, in_vector, out_vector, outerdim, innerdim);
}
|
c3eec7e635a599ec5ebd775c040e58cf85062a9a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - **\
* @authors Aaron Oziel, Sean Blackbourn
*
* Fumitaka Kawasaki (5/3/14):
* All functions were completed and working. Therefore, the followng comments
* were removed.
*
* Aaron Wrote (2/3/14):
* All comments are now tracking progress in conversion from old GpuSim_struct.cu
* file to the new one here. This is a quick key to keep track of their meanings.
*
* TODO = Needs work and/or is blank. Used to indicate possibly problematic
* functions.
* DONE = Likely complete functions. Will still need to be checked for
* variable continuity and proper arguments.
* REMOVED = Deleted, likely due to it becoming unnecessary or not necessary
* for GPU implementation. These functions will likely have to be
* removed from the Model super class.
* COPIED = These functions were in the original GpuSim_struct.cu file
* and were directly copy-pasted across to this file.
*
\** - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - **/
#include "GPUSpikingModel.h"
#ifdef PERFORMANCE_METRICS
float g_time;
hipEvent_t start, stop;
#endif // PERFORMANCE_METRICS
__constant__ int d_debug_mask[1];
// ----------------------------------------------------------------------------
GPUSpikingModel::GPUSpikingModel(Connections *conns, IAllNeurons *neurons, IAllSynapses *synapses, Layout *layout) :
Model::Model(conns, neurons, synapses, layout),
synapseIndexMapDevice(NULL),
randNoise_d(NULL),
m_allNeuronsDevice(NULL),
m_allSynapsesDevice(NULL)
{
}
GPUSpikingModel::~GPUSpikingModel()
{
//Let Model base class handle de-allocation
}
/*
* Allocates and initializes memories on CUDA device.
*
* @param[out] allNeuronsDevice Memory loation of the pointer to the neurons list on device memory.
* @param[out] allSynapsesDevice Memory loation of the pointer to the synapses list on device memory.
* @param[in] sim_info Pointer to the simulation information.
*/
void GPUSpikingModel::allocDeviceStruct(void** allNeuronsDevice, void** allSynapsesDevice, SimulationInfo *sim_info)
{
// Allocate Neurons and Synapses strucs on GPU device memory
m_neurons->allocNeuronDeviceStruct( allNeuronsDevice, sim_info );
m_synapses->allocSynapseDeviceStruct( allSynapsesDevice, sim_info );
// Allocate memory for random noise array
int neuron_count = sim_info->totalNeurons;
BGSIZE randNoise_d_size = neuron_count * sizeof (float); // size of random noise array
HANDLE_ERROR( hipMalloc ( ( void ** ) &randNoise_d, randNoise_d_size ) );
// Copy host neuron and synapse arrays into GPU device
m_neurons->copyNeuronHostToDevice( *allNeuronsDevice, sim_info );
m_synapses->copySynapseHostToDevice( *allSynapsesDevice, sim_info );
// allocate synapse inverse map in device memory
allocSynapseImap( neuron_count );
}
/*
* Copies device memories to host memories and deallocaes them.
*
* @param[out] allNeuronsDevice Memory loation of the pointer to the neurons list on device memory.
* @param[out] allSynapsesDevice Memory loation of the pointer to the synapses list on device memory.
* @param[in] sim_info Pointer to the simulation information.
*/
void GPUSpikingModel::deleteDeviceStruct(void** allNeuronsDevice, void** allSynapsesDevice, SimulationInfo *sim_info)
{
// copy device synapse and neuron structs to host memory
m_neurons->copyNeuronDeviceToHost( *allNeuronsDevice, sim_info );
// Deallocate device memory
m_neurons->deleteNeuronDeviceStruct( *allNeuronsDevice, sim_info );
// copy device synapse and neuron structs to host memory
m_synapses->copySynapseDeviceToHost( *allSynapsesDevice, sim_info );
// Deallocate device memory
m_synapses->deleteSynapseDeviceStruct( *allSynapsesDevice );
deleteSynapseImap();
HANDLE_ERROR( hipFree( randNoise_d ) );
}
/*
* Sets up the Simulation.
*
* @param sim_info SimulationInfo class to read information from.
*/
void GPUSpikingModel::setupSim(SimulationInfo *sim_info)
{
// Set device ID
HANDLE_ERROR( hipSetDevice( g_deviceId ) );
// Set DEBUG flag
HANDLE_ERROR( hipMemcpyToSymbol (d_debug_mask, &g_debug_mask, sizeof(int) ) );
Model::setupSim(sim_info);
//initialize Mersenne Twister
//assuming neuron_count >= 100 and is a multiple of 100. Note rng_mt_rng_count must be <= MT_RNG_COUNT
int rng_blocks = 25; //# of blocks the kernel will use
int rng_nPerRng = 4; //# of iterations per thread (thread granularity, # of rands generated per thread)
int rng_mt_rng_count = sim_info->totalNeurons/rng_nPerRng; //# of threads to generate for neuron_count rand #s
int rng_threads = rng_mt_rng_count/rng_blocks; //# threads per block needed
initMTGPU(sim_info->seed, rng_blocks, rng_threads, rng_nPerRng, rng_mt_rng_count);
#ifdef PERFORMANCE_METRICS
hipEventCreate( &start );
hipEventCreate( &stop );
t_gpu_rndGeneration = 0.0;
t_gpu_advanceNeurons = 0.0;
t_gpu_advanceSynapses = 0.0;
t_gpu_calcSummation = 0.0;
#endif // PERFORMANCE_METRICS
// allocates memories on CUDA device
allocDeviceStruct((void **)&m_allNeuronsDevice, (void **)&m_allSynapsesDevice, sim_info);
// copy inverse map to the device memory
copySynapseIndexMapHostToDevice(*m_synapseIndexMap, sim_info->totalNeurons);
// set some parameters used for advanceNeuronsDevice
m_neurons->setAdvanceNeuronsDeviceParams(*m_synapses);
// set some parameters used for advanceSynapsesDevice
m_synapses->setAdvanceSynapsesDeviceParams();
}
/*
* Begin terminating the simulator.
*
* @param sim_info SimulationInfo to refer.
*/
void GPUSpikingModel::cleanupSim(SimulationInfo *sim_info)
{
// deallocates memories on CUDA device
deleteDeviceStruct((void**)&m_allNeuronsDevice, (void**)&m_allSynapsesDevice, sim_info);
#ifdef PERFORMANCE_METRICS
hipEventDestroy( start );
hipEventDestroy( stop );
#endif // PERFORMANCE_METRICS
}
/*
* Loads the simulation based on istream input.
*
* @param input istream to read from.
* @param sim_info used as a reference to set info for neurons and synapses.
*/
void GPUSpikingModel::deserialize(istream& input, const SimulationInfo *sim_info)
{
Model::deserialize(input, sim_info);
// copy inverse map to the device memory
copySynapseIndexMapHostToDevice(*m_synapseIndexMap, sim_info->totalNeurons);
// Reinitialize device struct - Copy host neuron and synapse arrays into GPU device
m_neurons->copyNeuronHostToDevice( m_allNeuronsDevice, sim_info );
m_synapses->copySynapseHostToDevice( m_allSynapsesDevice, sim_info );
}
/*
* Advance everything in the model one time step. In this case, that
* means calling all of the kernels that do the "micro step" updating
* (i.e., NOT the stuff associated with growth).
*
* @param sim_info SimulationInfo class to read information from.
*/
void GPUSpikingModel::advance(const SimulationInfo *sim_info)
{
#ifdef PERFORMANCE_METRICS
// Reset CUDA timer to start measurement of GPU operations
cudaStartTimer();
#endif // PERFORMANCE_METRICS
normalMTGPU(randNoise_d);
#ifdef PERFORMANCE_METRICS
cudaLapTime(t_gpu_rndGeneration);
cudaStartTimer();
#endif // PERFORMANCE_METRICS
// display running info to console
// Advance neurons ------------->
m_neurons->advanceNeurons(*m_synapses, m_allNeuronsDevice, m_allSynapsesDevice, sim_info, randNoise_d, synapseIndexMapDevice);
#ifdef PERFORMANCE_METRICS
cudaLapTime(t_gpu_advanceNeurons);
cudaStartTimer();
#endif // PERFORMANCE_METRICS
// Advance synapses ------------->
m_synapses->advanceSynapses(m_allSynapsesDevice, m_allNeuronsDevice, synapseIndexMapDevice, sim_info);
#ifdef PERFORMANCE_METRICS
cudaLapTime(t_gpu_advanceSynapses);
cudaStartTimer();
#endif // PERFORMANCE_METRICS
// calculate summation point
calcSummationMap(sim_info);
#ifdef PERFORMANCE_METRICS
cudaLapTime(t_gpu_calcSummation);
#endif // PERFORMANCE_METRICS
}
/*
* Add psr of all incoming synapses to summation points.
*
* @param[in] sim_info Pointer to the simulation information.
*/
void GPUSpikingModel::calcSummationMap(const SimulationInfo *sim_info)
{
// CUDA parameters
const int threadsPerBlock = 256;
int blocksPerGrid = ( sim_info->totalNeurons + threadsPerBlock - 1 ) / threadsPerBlock;
hipLaunchKernelGGL(( calcSummationMapDevice) , dim3(blocksPerGrid), dim3(threadsPerBlock) , 0, 0, sim_info->totalNeurons, m_allNeuronsDevice, synapseIndexMapDevice, m_allSynapsesDevice );
}
/*
* Update the connection of all the Neurons and Synapses of the simulation.
*
* @param sim_info SimulationInfo class to read information from.
*/
void GPUSpikingModel::updateConnections(const SimulationInfo *sim_info)
{
dynamic_cast<AllSpikingNeurons*>(m_neurons)->copyNeuronDeviceSpikeCountsToHost(m_allNeuronsDevice, sim_info);
dynamic_cast<AllSpikingNeurons*>(m_neurons)->copyNeuronDeviceSpikeHistoryToHost(m_allNeuronsDevice, sim_info);
// Update Connections data
if (m_conns->updateConnections(*m_neurons, sim_info, m_layout)) {
m_conns->updateSynapsesWeights(sim_info->totalNeurons, *m_neurons, *m_synapses, sim_info, m_allNeuronsDevice, m_allSynapsesDevice, m_layout);
// create synapse inverse map
m_synapses->createSynapseImap(m_synapseIndexMap, sim_info);
// copy inverse map to the device memory
copySynapseIndexMapHostToDevice(*m_synapseIndexMap, sim_info->totalNeurons);
}
}
/*
* Update the Neuron's history.
*
* @param sim_info SimulationInfo to refer from.
*/
void GPUSpikingModel::updateHistory(const SimulationInfo *sim_info)
{
Model::updateHistory(sim_info);
// clear spike count
dynamic_cast<AllSpikingNeurons*>(m_neurons)->clearNeuronSpikeCounts(m_allNeuronsDevice, sim_info);
}
/* ------------------*\
|* # Helper Functions
\* ------------------*/
/*
* Allocate device memory for synapse inverse map.
* @param count The number of neurons.
*/
void GPUSpikingModel::allocSynapseImap( int count )
{
SynapseIndexMap synapseIndexMap;
HANDLE_ERROR( hipMalloc( ( void ** ) &synapseIndexMap.outgoingSynapseBegin, count * sizeof( BGSIZE ) ) );
HANDLE_ERROR( hipMalloc( ( void ** ) &synapseIndexMap.outgoingSynapseCount, count * sizeof( BGSIZE ) ) );
HANDLE_ERROR( hipMemset(synapseIndexMap.outgoingSynapseBegin, 0, count * sizeof( BGSIZE ) ) );
HANDLE_ERROR( hipMemset(synapseIndexMap.outgoingSynapseCount, 0, count * sizeof( BGSIZE ) ) );
HANDLE_ERROR( hipMalloc( ( void ** ) &synapseIndexMap.incomingSynapseBegin, count * sizeof( BGSIZE ) ) );
HANDLE_ERROR( hipMalloc( ( void ** ) &synapseIndexMap.incomingSynapseCount, count * sizeof( BGSIZE ) ) );
HANDLE_ERROR( hipMemset(synapseIndexMap.incomingSynapseBegin, 0, count * sizeof( BGSIZE ) ) );
HANDLE_ERROR( hipMemset(synapseIndexMap.incomingSynapseCount, 0, count * sizeof( BGSIZE ) ) );
HANDLE_ERROR( hipMalloc( ( void ** ) &synapseIndexMapDevice, sizeof( SynapseIndexMap ) ) );
HANDLE_ERROR( hipMemcpy( synapseIndexMapDevice, &synapseIndexMap, sizeof( SynapseIndexMap ), hipMemcpyHostToDevice ) );
}
/*
* Deallocate device memory for synapse inverse map.
*/
void GPUSpikingModel::deleteSynapseImap( )
{
SynapseIndexMap synapseIndexMap;
HANDLE_ERROR( hipMemcpy ( &synapseIndexMap, synapseIndexMapDevice, sizeof( SynapseIndexMap ), hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipFree( synapseIndexMap.outgoingSynapseBegin ) );
HANDLE_ERROR( hipFree( synapseIndexMap.outgoingSynapseCount ) );
HANDLE_ERROR( hipFree( synapseIndexMap.outgoingSynapseIndexMap ) );
HANDLE_ERROR( hipFree( synapseIndexMap.incomingSynapseBegin ) );
HANDLE_ERROR( hipFree( synapseIndexMap.incomingSynapseCount ) );
HANDLE_ERROR( hipFree( synapseIndexMap.incomingSynapseIndexMap ) );
HANDLE_ERROR( hipFree( synapseIndexMapDevice ) );
}
/*
* Copy SynapseIndexMap in host memory to SynapseIndexMap in device memory.
*
* @param synapseIndexMapHost Reference to the SynapseIndexMap in host memory.
* @param neuron_count The number of neurons.
*/
void GPUSpikingModel::copySynapseIndexMapHostToDevice(SynapseIndexMap &synapseIndexMapHost, int neuron_count)
{
int total_synapse_counts = dynamic_cast<AllSynapses*>(m_synapses)->total_synapse_counts;
if (total_synapse_counts == 0)
return;
SynapseIndexMap synapseIndexMap;
HANDLE_ERROR( hipMemcpy ( &synapseIndexMap, synapseIndexMapDevice, sizeof( SynapseIndexMap ), hipMemcpyDeviceToHost ) );
// forward map
HANDLE_ERROR( hipMemcpy ( synapseIndexMap.outgoingSynapseBegin, synapseIndexMapHost.outgoingSynapseBegin, neuron_count * sizeof( BGSIZE ), hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy ( synapseIndexMap.outgoingSynapseCount, synapseIndexMapHost.outgoingSynapseCount, neuron_count * sizeof( BGSIZE ), hipMemcpyHostToDevice ) );
// the number of synapses may change, so we reallocate the memory
if (synapseIndexMap.outgoingSynapseIndexMap != NULL) {
HANDLE_ERROR( hipFree( synapseIndexMap.outgoingSynapseIndexMap ) );
}
HANDLE_ERROR( hipMalloc( ( void ** ) &synapseIndexMap.outgoingSynapseIndexMap, total_synapse_counts * sizeof( BGSIZE ) ) );
HANDLE_ERROR( hipMemcpy ( synapseIndexMap.outgoingSynapseIndexMap, synapseIndexMapHost.outgoingSynapseIndexMap, total_synapse_counts * sizeof( BGSIZE ), hipMemcpyHostToDevice ) );
// active synapse map
HANDLE_ERROR( hipMemcpy ( synapseIndexMap.incomingSynapseBegin, synapseIndexMapHost.incomingSynapseBegin, neuron_count * sizeof( BGSIZE ), hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy ( synapseIndexMap.incomingSynapseCount, synapseIndexMapHost.incomingSynapseCount, neuron_count * sizeof( BGSIZE ), hipMemcpyHostToDevice ) );
// the number of synapses may change, so we reallocate the memory
if (synapseIndexMap.incomingSynapseIndexMap != NULL) {
HANDLE_ERROR( hipFree( synapseIndexMap.incomingSynapseIndexMap ) );
}
HANDLE_ERROR( hipMalloc( ( void ** ) &synapseIndexMap.incomingSynapseIndexMap, total_synapse_counts * sizeof( BGSIZE ) ) );
HANDLE_ERROR( hipMemcpy ( synapseIndexMap.incomingSynapseIndexMap, synapseIndexMapHost.incomingSynapseIndexMap, total_synapse_counts * sizeof( BGSIZE ), hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy ( synapseIndexMapDevice, &synapseIndexMap, sizeof( SynapseIndexMap ), hipMemcpyHostToDevice ) );
}
/* ------------------*\
|* # Global Functions
\* ------------------*/
/**
* Calculate the sum of synaptic input to each neuron.
*
* Calculate the sum of synaptic input to each neuron. One thread
* corresponds to one neuron. Iterates sequentially through the
* forward synapse index map (synapseIndexMapDevice) to access only
* existing synapses. Using this structure eliminates the need to skip
* synapses that have undergone lazy deletion from the main
* (allSynapsesDevice) synapse structure. The forward map is
* re-computed during each network restructure (once per epoch) to
* ensure that all synapse pointers for a neuron are stored
* contiguously.
*
* @param[in] totalNeurons Number of neurons in the entire simulation.
* @param[in,out] allNeuronsDevice Pointer to Neuron structures in device memory.
* @param[in] synapseIndexMapDevice Pointer to forward map structures in device memory.
* @param[in] allSynapsesDevice Pointer to Synapse structures in device memory.
*/
__global__ void calcSummationMapDevice(int totalNeurons,
AllSpikingNeuronsDeviceProperties* __restrict__ allNeuronsDevice,
const SynapseIndexMap* __restrict__ synapseIndexMapDevice,
const AllSpikingSynapsesDeviceProperties* __restrict__ allSynapsesDevice)
{
// The usual thread ID calculation and guard against excess threads
// (beyond the number of neurons, in this case).
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if ( idx >= totalNeurons )
return;
// Number of incoming synapses
const BGSIZE synCount = synapseIndexMapDevice->incomingSynapseCount[idx];
// Optimization: terminate thread if no incoming synapses
if (synCount != 0) {
// Index of start of this neuron's block of forward map entries
const int beginIndex = synapseIndexMapDevice->incomingSynapseBegin[idx];
// Address of the start of this neuron's block of forward map entries
const BGSIZE* activeMap_begin =
&(synapseIndexMapDevice->incomingSynapseIndexMap[beginIndex]);
// Summed postsynaptic response (PSR)
BGFLOAT sum = 0.0;
// Index of the current incoming synapse
BGSIZE synIndex;
// Repeat for each incoming synapse
for (BGSIZE i = 0; i < synCount; i++) {
// Get index of current incoming synapse
synIndex = activeMap_begin[i];
// Fetch its PSR and add into sum
sum += allSynapsesDevice->psr[synIndex];
}
// Store summed PSR into this neuron's summation point
allNeuronsDevice->summation_map[idx] = sum;
}
}
| c3eec7e635a599ec5ebd775c040e58cf85062a9a.cu | /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - **\
* @authors Aaron Oziel, Sean Blackbourn
*
* Fumitaka Kawasaki (5/3/14):
* All functions were completed and working. Therefore, the followng comments
* were removed.
*
* Aaron Wrote (2/3/14):
* All comments are now tracking progress in conversion from old GpuSim_struct.cu
* file to the new one here. This is a quick key to keep track of their meanings.
*
* TODO = Needs work and/or is blank. Used to indicate possibly problematic
* functions.
* DONE = Likely complete functions. Will still need to be checked for
* variable continuity and proper arguments.
* REMOVED = Deleted, likely due to it becoming unnecessary or not necessary
* for GPU implementation. These functions will likely have to be
* removed from the Model super class.
* COPIED = These functions were in the original GpuSim_struct.cu file
* and were directly copy-pasted across to this file.
*
\** - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - **/
#include "GPUSpikingModel.h"
#ifdef PERFORMANCE_METRICS
float g_time;
cudaEvent_t start, stop;
#endif // PERFORMANCE_METRICS
__constant__ int d_debug_mask[1];
// ----------------------------------------------------------------------------
GPUSpikingModel::GPUSpikingModel(Connections *conns, IAllNeurons *neurons, IAllSynapses *synapses, Layout *layout) :
Model::Model(conns, neurons, synapses, layout),
synapseIndexMapDevice(NULL),
randNoise_d(NULL),
m_allNeuronsDevice(NULL),
m_allSynapsesDevice(NULL)
{
}
GPUSpikingModel::~GPUSpikingModel()
{
//Let Model base class handle de-allocation
}
/*
* Allocates and initializes memories on CUDA device.
*
* @param[out] allNeuronsDevice Memory loation of the pointer to the neurons list on device memory.
* @param[out] allSynapsesDevice Memory loation of the pointer to the synapses list on device memory.
* @param[in] sim_info Pointer to the simulation information.
*/
void GPUSpikingModel::allocDeviceStruct(void** allNeuronsDevice, void** allSynapsesDevice, SimulationInfo *sim_info)
{
// Allocate Neurons and Synapses strucs on GPU device memory
m_neurons->allocNeuronDeviceStruct( allNeuronsDevice, sim_info );
m_synapses->allocSynapseDeviceStruct( allSynapsesDevice, sim_info );
// Allocate memory for random noise array
int neuron_count = sim_info->totalNeurons;
BGSIZE randNoise_d_size = neuron_count * sizeof (float); // size of random noise array
HANDLE_ERROR( cudaMalloc ( ( void ** ) &randNoise_d, randNoise_d_size ) );
// Copy host neuron and synapse arrays into GPU device
m_neurons->copyNeuronHostToDevice( *allNeuronsDevice, sim_info );
m_synapses->copySynapseHostToDevice( *allSynapsesDevice, sim_info );
// allocate synapse inverse map in device memory
allocSynapseImap( neuron_count );
}
/*
* Copies device memories to host memories and deallocaes them.
*
* @param[out] allNeuronsDevice Memory loation of the pointer to the neurons list on device memory.
* @param[out] allSynapsesDevice Memory loation of the pointer to the synapses list on device memory.
* @param[in] sim_info Pointer to the simulation information.
*/
void GPUSpikingModel::deleteDeviceStruct(void** allNeuronsDevice, void** allSynapsesDevice, SimulationInfo *sim_info)
{
// copy device synapse and neuron structs to host memory
m_neurons->copyNeuronDeviceToHost( *allNeuronsDevice, sim_info );
// Deallocate device memory
m_neurons->deleteNeuronDeviceStruct( *allNeuronsDevice, sim_info );
// copy device synapse and neuron structs to host memory
m_synapses->copySynapseDeviceToHost( *allSynapsesDevice, sim_info );
// Deallocate device memory
m_synapses->deleteSynapseDeviceStruct( *allSynapsesDevice );
deleteSynapseImap();
HANDLE_ERROR( cudaFree( randNoise_d ) );
}
/*
* Sets up the Simulation.
*
* @param sim_info SimulationInfo class to read information from.
*/
void GPUSpikingModel::setupSim(SimulationInfo *sim_info)
{
// Set device ID
HANDLE_ERROR( cudaSetDevice( g_deviceId ) );
// Set DEBUG flag
HANDLE_ERROR( cudaMemcpyToSymbol (d_debug_mask, &g_debug_mask, sizeof(int) ) );
Model::setupSim(sim_info);
//initialize Mersenne Twister
//assuming neuron_count >= 100 and is a multiple of 100. Note rng_mt_rng_count must be <= MT_RNG_COUNT
int rng_blocks = 25; //# of blocks the kernel will use
int rng_nPerRng = 4; //# of iterations per thread (thread granularity, # of rands generated per thread)
int rng_mt_rng_count = sim_info->totalNeurons/rng_nPerRng; //# of threads to generate for neuron_count rand #s
int rng_threads = rng_mt_rng_count/rng_blocks; //# threads per block needed
initMTGPU(sim_info->seed, rng_blocks, rng_threads, rng_nPerRng, rng_mt_rng_count);
#ifdef PERFORMANCE_METRICS
cudaEventCreate( &start );
cudaEventCreate( &stop );
t_gpu_rndGeneration = 0.0;
t_gpu_advanceNeurons = 0.0;
t_gpu_advanceSynapses = 0.0;
t_gpu_calcSummation = 0.0;
#endif // PERFORMANCE_METRICS
// allocates memories on CUDA device
allocDeviceStruct((void **)&m_allNeuronsDevice, (void **)&m_allSynapsesDevice, sim_info);
// copy inverse map to the device memory
copySynapseIndexMapHostToDevice(*m_synapseIndexMap, sim_info->totalNeurons);
// set some parameters used for advanceNeuronsDevice
m_neurons->setAdvanceNeuronsDeviceParams(*m_synapses);
// set some parameters used for advanceSynapsesDevice
m_synapses->setAdvanceSynapsesDeviceParams();
}
/*
* Begin terminating the simulator.
*
* @param sim_info SimulationInfo to refer.
*/
void GPUSpikingModel::cleanupSim(SimulationInfo *sim_info)
{
// deallocates memories on CUDA device
deleteDeviceStruct((void**)&m_allNeuronsDevice, (void**)&m_allSynapsesDevice, sim_info);
#ifdef PERFORMANCE_METRICS
cudaEventDestroy( start );
cudaEventDestroy( stop );
#endif // PERFORMANCE_METRICS
}
/*
* Loads the simulation based on istream input.
*
* @param input istream to read from.
* @param sim_info used as a reference to set info for neurons and synapses.
*/
void GPUSpikingModel::deserialize(istream& input, const SimulationInfo *sim_info)
{
Model::deserialize(input, sim_info);
// copy inverse map to the device memory
copySynapseIndexMapHostToDevice(*m_synapseIndexMap, sim_info->totalNeurons);
// Reinitialize device struct - Copy host neuron and synapse arrays into GPU device
m_neurons->copyNeuronHostToDevice( m_allNeuronsDevice, sim_info );
m_synapses->copySynapseHostToDevice( m_allSynapsesDevice, sim_info );
}
/*
* Advance everything in the model one time step. In this case, that
* means calling all of the kernels that do the "micro step" updating
* (i.e., NOT the stuff associated with growth).
*
* @param sim_info SimulationInfo class to read information from.
*/
void GPUSpikingModel::advance(const SimulationInfo *sim_info)
{
#ifdef PERFORMANCE_METRICS
// Reset CUDA timer to start measurement of GPU operations
cudaStartTimer();
#endif // PERFORMANCE_METRICS
normalMTGPU(randNoise_d);
#ifdef PERFORMANCE_METRICS
cudaLapTime(t_gpu_rndGeneration);
cudaStartTimer();
#endif // PERFORMANCE_METRICS
// display running info to console
// Advance neurons ------------->
m_neurons->advanceNeurons(*m_synapses, m_allNeuronsDevice, m_allSynapsesDevice, sim_info, randNoise_d, synapseIndexMapDevice);
#ifdef PERFORMANCE_METRICS
cudaLapTime(t_gpu_advanceNeurons);
cudaStartTimer();
#endif // PERFORMANCE_METRICS
// Advance synapses ------------->
m_synapses->advanceSynapses(m_allSynapsesDevice, m_allNeuronsDevice, synapseIndexMapDevice, sim_info);
#ifdef PERFORMANCE_METRICS
cudaLapTime(t_gpu_advanceSynapses);
cudaStartTimer();
#endif // PERFORMANCE_METRICS
// calculate summation point
calcSummationMap(sim_info);
#ifdef PERFORMANCE_METRICS
cudaLapTime(t_gpu_calcSummation);
#endif // PERFORMANCE_METRICS
}
/*
* Add psr of all incoming synapses to summation points.
*
* @param[in] sim_info Pointer to the simulation information.
*/
void GPUSpikingModel::calcSummationMap(const SimulationInfo *sim_info)
{
// CUDA parameters
const int threadsPerBlock = 256;
int blocksPerGrid = ( sim_info->totalNeurons + threadsPerBlock - 1 ) / threadsPerBlock;
calcSummationMapDevice <<< blocksPerGrid, threadsPerBlock >>> ( sim_info->totalNeurons, m_allNeuronsDevice, synapseIndexMapDevice, m_allSynapsesDevice );
}
/*
* Update the connection of all the Neurons and Synapses of the simulation.
*
* @param sim_info SimulationInfo class to read information from.
*/
void GPUSpikingModel::updateConnections(const SimulationInfo *sim_info)
{
dynamic_cast<AllSpikingNeurons*>(m_neurons)->copyNeuronDeviceSpikeCountsToHost(m_allNeuronsDevice, sim_info);
dynamic_cast<AllSpikingNeurons*>(m_neurons)->copyNeuronDeviceSpikeHistoryToHost(m_allNeuronsDevice, sim_info);
// Update Connections data
if (m_conns->updateConnections(*m_neurons, sim_info, m_layout)) {
m_conns->updateSynapsesWeights(sim_info->totalNeurons, *m_neurons, *m_synapses, sim_info, m_allNeuronsDevice, m_allSynapsesDevice, m_layout);
// create synapse inverse map
m_synapses->createSynapseImap(m_synapseIndexMap, sim_info);
// copy inverse map to the device memory
copySynapseIndexMapHostToDevice(*m_synapseIndexMap, sim_info->totalNeurons);
}
}
/*
* Update the Neuron's history.
*
* @param sim_info SimulationInfo to refer from.
*/
void GPUSpikingModel::updateHistory(const SimulationInfo *sim_info)
{
Model::updateHistory(sim_info);
// clear spike count
dynamic_cast<AllSpikingNeurons*>(m_neurons)->clearNeuronSpikeCounts(m_allNeuronsDevice, sim_info);
}
/* ------------------*\
|* # Helper Functions
\* ------------------*/
/*
* Allocate device memory for synapse inverse map.
* @param count The number of neurons.
*/
void GPUSpikingModel::allocSynapseImap( int count )
{
SynapseIndexMap synapseIndexMap;
HANDLE_ERROR( cudaMalloc( ( void ** ) &synapseIndexMap.outgoingSynapseBegin, count * sizeof( BGSIZE ) ) );
HANDLE_ERROR( cudaMalloc( ( void ** ) &synapseIndexMap.outgoingSynapseCount, count * sizeof( BGSIZE ) ) );
HANDLE_ERROR( cudaMemset(synapseIndexMap.outgoingSynapseBegin, 0, count * sizeof( BGSIZE ) ) );
HANDLE_ERROR( cudaMemset(synapseIndexMap.outgoingSynapseCount, 0, count * sizeof( BGSIZE ) ) );
HANDLE_ERROR( cudaMalloc( ( void ** ) &synapseIndexMap.incomingSynapseBegin, count * sizeof( BGSIZE ) ) );
HANDLE_ERROR( cudaMalloc( ( void ** ) &synapseIndexMap.incomingSynapseCount, count * sizeof( BGSIZE ) ) );
HANDLE_ERROR( cudaMemset(synapseIndexMap.incomingSynapseBegin, 0, count * sizeof( BGSIZE ) ) );
HANDLE_ERROR( cudaMemset(synapseIndexMap.incomingSynapseCount, 0, count * sizeof( BGSIZE ) ) );
HANDLE_ERROR( cudaMalloc( ( void ** ) &synapseIndexMapDevice, sizeof( SynapseIndexMap ) ) );
HANDLE_ERROR( cudaMemcpy( synapseIndexMapDevice, &synapseIndexMap, sizeof( SynapseIndexMap ), cudaMemcpyHostToDevice ) );
}
/*
* Deallocate device memory for synapse inverse map.
*/
void GPUSpikingModel::deleteSynapseImap( )
{
SynapseIndexMap synapseIndexMap;
HANDLE_ERROR( cudaMemcpy ( &synapseIndexMap, synapseIndexMapDevice, sizeof( SynapseIndexMap ), cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaFree( synapseIndexMap.outgoingSynapseBegin ) );
HANDLE_ERROR( cudaFree( synapseIndexMap.outgoingSynapseCount ) );
HANDLE_ERROR( cudaFree( synapseIndexMap.outgoingSynapseIndexMap ) );
HANDLE_ERROR( cudaFree( synapseIndexMap.incomingSynapseBegin ) );
HANDLE_ERROR( cudaFree( synapseIndexMap.incomingSynapseCount ) );
HANDLE_ERROR( cudaFree( synapseIndexMap.incomingSynapseIndexMap ) );
HANDLE_ERROR( cudaFree( synapseIndexMapDevice ) );
}
/*
* Copy SynapseIndexMap in host memory to SynapseIndexMap in device memory.
*
* @param synapseIndexMapHost Reference to the SynapseIndexMap in host memory.
* @param neuron_count The number of neurons.
*/
void GPUSpikingModel::copySynapseIndexMapHostToDevice(SynapseIndexMap &synapseIndexMapHost, int neuron_count)
{
int total_synapse_counts = dynamic_cast<AllSynapses*>(m_synapses)->total_synapse_counts;
if (total_synapse_counts == 0)
return;
SynapseIndexMap synapseIndexMap;
HANDLE_ERROR( cudaMemcpy ( &synapseIndexMap, synapseIndexMapDevice, sizeof( SynapseIndexMap ), cudaMemcpyDeviceToHost ) );
// forward map
HANDLE_ERROR( cudaMemcpy ( synapseIndexMap.outgoingSynapseBegin, synapseIndexMapHost.outgoingSynapseBegin, neuron_count * sizeof( BGSIZE ), cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy ( synapseIndexMap.outgoingSynapseCount, synapseIndexMapHost.outgoingSynapseCount, neuron_count * sizeof( BGSIZE ), cudaMemcpyHostToDevice ) );
// the number of synapses may change, so we reallocate the memory
if (synapseIndexMap.outgoingSynapseIndexMap != NULL) {
HANDLE_ERROR( cudaFree( synapseIndexMap.outgoingSynapseIndexMap ) );
}
HANDLE_ERROR( cudaMalloc( ( void ** ) &synapseIndexMap.outgoingSynapseIndexMap, total_synapse_counts * sizeof( BGSIZE ) ) );
HANDLE_ERROR( cudaMemcpy ( synapseIndexMap.outgoingSynapseIndexMap, synapseIndexMapHost.outgoingSynapseIndexMap, total_synapse_counts * sizeof( BGSIZE ), cudaMemcpyHostToDevice ) );
// active synapse map
HANDLE_ERROR( cudaMemcpy ( synapseIndexMap.incomingSynapseBegin, synapseIndexMapHost.incomingSynapseBegin, neuron_count * sizeof( BGSIZE ), cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy ( synapseIndexMap.incomingSynapseCount, synapseIndexMapHost.incomingSynapseCount, neuron_count * sizeof( BGSIZE ), cudaMemcpyHostToDevice ) );
// the number of synapses may change, so we reallocate the memory
if (synapseIndexMap.incomingSynapseIndexMap != NULL) {
HANDLE_ERROR( cudaFree( synapseIndexMap.incomingSynapseIndexMap ) );
}
HANDLE_ERROR( cudaMalloc( ( void ** ) &synapseIndexMap.incomingSynapseIndexMap, total_synapse_counts * sizeof( BGSIZE ) ) );
HANDLE_ERROR( cudaMemcpy ( synapseIndexMap.incomingSynapseIndexMap, synapseIndexMapHost.incomingSynapseIndexMap, total_synapse_counts * sizeof( BGSIZE ), cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy ( synapseIndexMapDevice, &synapseIndexMap, sizeof( SynapseIndexMap ), cudaMemcpyHostToDevice ) );
}
/* ------------------*\
|* # Global Functions
\* ------------------*/
/**
* Calculate the sum of synaptic input to each neuron.
*
* Calculate the sum of synaptic input to each neuron. One thread
* corresponds to one neuron. Iterates sequentially through the
* forward synapse index map (synapseIndexMapDevice) to access only
* existing synapses. Using this structure eliminates the need to skip
* synapses that have undergone lazy deletion from the main
* (allSynapsesDevice) synapse structure. The forward map is
* re-computed during each network restructure (once per epoch) to
* ensure that all synapse pointers for a neuron are stored
* contiguously.
*
* @param[in] totalNeurons Number of neurons in the entire simulation.
* @param[in,out] allNeuronsDevice Pointer to Neuron structures in device memory.
* @param[in] synapseIndexMapDevice Pointer to forward map structures in device memory.
* @param[in] allSynapsesDevice Pointer to Synapse structures in device memory.
*/
__global__ void calcSummationMapDevice(int totalNeurons,
AllSpikingNeuronsDeviceProperties* __restrict__ allNeuronsDevice,
const SynapseIndexMap* __restrict__ synapseIndexMapDevice,
const AllSpikingSynapsesDeviceProperties* __restrict__ allSynapsesDevice)
{
// The usual thread ID calculation and guard against excess threads
// (beyond the number of neurons, in this case).
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if ( idx >= totalNeurons )
return;
// Number of incoming synapses
const BGSIZE synCount = synapseIndexMapDevice->incomingSynapseCount[idx];
// Optimization: terminate thread if no incoming synapses
if (synCount != 0) {
// Index of start of this neuron's block of forward map entries
const int beginIndex = synapseIndexMapDevice->incomingSynapseBegin[idx];
// Address of the start of this neuron's block of forward map entries
const BGSIZE* activeMap_begin =
&(synapseIndexMapDevice->incomingSynapseIndexMap[beginIndex]);
// Summed postsynaptic response (PSR)
BGFLOAT sum = 0.0;
// Index of the current incoming synapse
BGSIZE synIndex;
// Repeat for each incoming synapse
for (BGSIZE i = 0; i < synCount; i++) {
// Get index of current incoming synapse
synIndex = activeMap_begin[i];
// Fetch its PSR and add into sum
sum += allSynapsesDevice->psr[synIndex];
}
// Store summed PSR into this neuron's summation point
allNeuronsDevice->summation_map[idx] = sum;
}
}
|
e31381b56d2a71af8e9773d070a156435acd0df6.hip | // !!! This is a file automatically generated by hipify!!!
///
/// Copyright (c) 2013, Intel Corporation
///
/// Redistribution and use in source and binary forms, with or without
/// modification, are permitted provided that the following conditions
/// are met:
///
/// * Redistributions of source code must retain the above copyright
/// notice, this list of conditions and the following disclaimer.
/// * Redistributions in binary form must reproduce the above
/// copyright notice, this list of conditions and the following
/// disclaimer in the documentation and/or other materials provided
/// with the distribution.
/// * Neither the name of Intel Corporation nor the names of its
/// contributors may be used to endorse or promote products
/// derived from this software without specific prior written
/// permission.
///
/// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
/// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
/// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
/// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
/// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
/// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
/// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
/// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
/// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
/// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
/// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
/// POSSIBILITY OF SUCH DAMAGE.
//////////////////////////////////////////////////////////////////////
///
/// NAME: transpose
///
/// PURPOSE: This program measures the time for the transpose of a
/// column-major stored matrix into a row-major stored matrix.
///
/// USAGE: Program input is the matrix order and the number of times to
/// repeat the operation:
///
/// transpose <matrix_size> <# iterations>
///
/// The output consists of diagnostics to make sure the
/// transpose worked and timing statistics.
///
/// HISTORY: Written by Rob Van der Wijngaart, February 2009.
/// Converted to C++11 by Jeff Hammond, February 2016 and May 2017.
///
//////////////////////////////////////////////////////////////////////
#include "prk_util.h"
struct x : public thrust::unary_function<void,int>
{
int i;
int order;
thrust::device_vector<double> & A;
thrust::device_vector<double> & B;
x(int i, int order, thrust::device_vector<double> & A, thrust::device_vector<double> & B) :
i(i), order(order), A(A), B(B) {}
__host__ __device__
void operator()(int j)
{
B[i*order+j] += A[j*order+i];
A[j*order+i] += 1.0;
return;
}
};
//__device__
void transpose(const int order, thrust::device_vector<double> & A, thrust::device_vector<double> & B)
{
thrust::counting_iterator<int> start(0);
thrust::counting_iterator<int> end = start + order;
thrust::for_each( thrust::device, start, end, [=,&A,&B] (int i) {
thrust::for_each( thrust::device, start, end, x(i,order,A,B) );
});
}
int main(int argc, char * argv[])
{
std::cout << "Parallel Research Kernels version " << PRKVERSION << std::endl;
std::cout << "C++11/Thrust Matrix transpose: B = A^T" << std::endl;
//////////////////////////////////////////////////////////////////////
/// Read and test input parameters
//////////////////////////////////////////////////////////////////////
int iterations;
int order;
try {
if (argc < 3) {
throw "Usage: <# iterations> <matrix order>";
}
// number of times to do the transpose
iterations = std::atoi(argv[1]);
if (iterations < 1) {
throw "ERROR: iterations must be >= 1";
}
// order of a the matrix
order = std::atoi(argv[2]);
if (order <= 0) {
throw "ERROR: Matrix Order must be greater than 0";
} else if (order > ::floor(std::sqrt(INT_MAX))) {
throw "ERROR: matrix dimension too large - overflow risk";
}
}
catch (const char * e) {
std::cout << e << std::endl;
return 1;
}
std::cout << "Number of iterations = " << iterations << std::endl;
std::cout << "Matrix order = " << order << std::endl;
//////////////////////////////////////////////////////////////////////
/// Allocate space for the input and transpose matrix
//////////////////////////////////////////////////////////////////////
thrust::device_vector<double> A(order*order);
thrust::device_vector<double> B(order*order);
// fill A with the sequence 0 to order^2-1 as doubles
thrust::sequence(thrust::device, A.begin(), A.end() );
thrust::fill(thrust::device, B.begin(), B.end(), 0.0);
auto range = boost::irange(0,order);
auto trans_time = 0.0;
for (auto iter = 0; iter<=iterations; iter++) {
if (iter==1) trans_time = prk::wtime();
#if 1
transpose(order, A, B);
#else
thrust::for_each( std::begin(range), std::end(range), [=,&A,&B] (int i) {
thrust::for_each( std::begin(range), std::end(range), [=,&A,&B] (int j) {
B[i*order+j] += A[j*order+i];
A[j*order+i] += 1.0;
});
});
#endif
}
trans_time = prk::wtime() - trans_time;
//////////////////////////////////////////////////////////////////////
/// Analyze and output results
//////////////////////////////////////////////////////////////////////
// TODO: replace with std::generate, std::accumulate, or similar
const auto addit = (iterations+1.) * (iterations/2.);
auto abserr = 0.0;
for (auto i : range) {
for (auto j : range) {
const int ij = i*order+j;
const int ji = j*order+i;
const double reference = static_cast<double>(ij)*(1.+iterations)+addit;
abserr += ::fabs(B[ji] - reference);
}
}
#ifdef VERBOSE
std::cout << "Sum of absolute differences: " << abserr << std::endl;
#endif
const auto epsilon = 1.0e-8;
if (abserr < epsilon) {
std::cout << "Solution validates" << std::endl;
auto avgtime = trans_time/iterations;
auto bytes = (size_t)order * (size_t)order * sizeof(double);
std::cout << "Rate (MB/s): " << 1.0e-6 * (2L*bytes)/avgtime
<< " Avg time (s): " << avgtime << std::endl;
} else {
std::cout << "ERROR: Aggregate squared error " << abserr
<< " exceeds threshold " << epsilon << std::endl;
return 1;
}
return 0;
}
| e31381b56d2a71af8e9773d070a156435acd0df6.cu | ///
/// Copyright (c) 2013, Intel Corporation
///
/// Redistribution and use in source and binary forms, with or without
/// modification, are permitted provided that the following conditions
/// are met:
///
/// * Redistributions of source code must retain the above copyright
/// notice, this list of conditions and the following disclaimer.
/// * Redistributions in binary form must reproduce the above
/// copyright notice, this list of conditions and the following
/// disclaimer in the documentation and/or other materials provided
/// with the distribution.
/// * Neither the name of Intel Corporation nor the names of its
/// contributors may be used to endorse or promote products
/// derived from this software without specific prior written
/// permission.
///
/// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
/// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
/// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
/// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
/// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
/// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
/// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
/// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
/// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
/// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
/// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
/// POSSIBILITY OF SUCH DAMAGE.
//////////////////////////////////////////////////////////////////////
///
/// NAME: transpose
///
/// PURPOSE: This program measures the time for the transpose of a
/// column-major stored matrix into a row-major stored matrix.
///
/// USAGE: Program input is the matrix order and the number of times to
/// repeat the operation:
///
/// transpose <matrix_size> <# iterations>
///
/// The output consists of diagnostics to make sure the
/// transpose worked and timing statistics.
///
/// HISTORY: Written by Rob Van der Wijngaart, February 2009.
/// Converted to C++11 by Jeff Hammond, February 2016 and May 2017.
///
//////////////////////////////////////////////////////////////////////
#include "prk_util.h"
struct x : public thrust::unary_function<void,int>
{
int i;
int order;
thrust::device_vector<double> & A;
thrust::device_vector<double> & B;
x(int i, int order, thrust::device_vector<double> & A, thrust::device_vector<double> & B) :
i(i), order(order), A(A), B(B) {}
__host__ __device__
void operator()(int j)
{
B[i*order+j] += A[j*order+i];
A[j*order+i] += 1.0;
return;
}
};
//__device__
void transpose(const int order, thrust::device_vector<double> & A, thrust::device_vector<double> & B)
{
thrust::counting_iterator<int> start(0);
thrust::counting_iterator<int> end = start + order;
thrust::for_each( thrust::device, start, end, [=,&A,&B] (int i) {
thrust::for_each( thrust::device, start, end, x(i,order,A,B) );
});
}
int main(int argc, char * argv[])
{
std::cout << "Parallel Research Kernels version " << PRKVERSION << std::endl;
std::cout << "C++11/Thrust Matrix transpose: B = A^T" << std::endl;
//////////////////////////////////////////////////////////////////////
/// Read and test input parameters
//////////////////////////////////////////////////////////////////////
int iterations;
int order;
try {
if (argc < 3) {
throw "Usage: <# iterations> <matrix order>";
}
// number of times to do the transpose
iterations = std::atoi(argv[1]);
if (iterations < 1) {
throw "ERROR: iterations must be >= 1";
}
// order of a the matrix
order = std::atoi(argv[2]);
if (order <= 0) {
throw "ERROR: Matrix Order must be greater than 0";
} else if (order > std::floor(std::sqrt(INT_MAX))) {
throw "ERROR: matrix dimension too large - overflow risk";
}
}
catch (const char * e) {
std::cout << e << std::endl;
return 1;
}
std::cout << "Number of iterations = " << iterations << std::endl;
std::cout << "Matrix order = " << order << std::endl;
//////////////////////////////////////////////////////////////////////
/// Allocate space for the input and transpose matrix
//////////////////////////////////////////////////////////////////////
thrust::device_vector<double> A(order*order);
thrust::device_vector<double> B(order*order);
// fill A with the sequence 0 to order^2-1 as doubles
thrust::sequence(thrust::device, A.begin(), A.end() );
thrust::fill(thrust::device, B.begin(), B.end(), 0.0);
auto range = boost::irange(0,order);
auto trans_time = 0.0;
for (auto iter = 0; iter<=iterations; iter++) {
if (iter==1) trans_time = prk::wtime();
#if 1
transpose(order, A, B);
#else
thrust::for_each( std::begin(range), std::end(range), [=,&A,&B] (int i) {
thrust::for_each( std::begin(range), std::end(range), [=,&A,&B] (int j) {
B[i*order+j] += A[j*order+i];
A[j*order+i] += 1.0;
});
});
#endif
}
trans_time = prk::wtime() - trans_time;
//////////////////////////////////////////////////////////////////////
/// Analyze and output results
//////////////////////////////////////////////////////////////////////
// TODO: replace with std::generate, std::accumulate, or similar
const auto addit = (iterations+1.) * (iterations/2.);
auto abserr = 0.0;
for (auto i : range) {
for (auto j : range) {
const int ij = i*order+j;
const int ji = j*order+i;
const double reference = static_cast<double>(ij)*(1.+iterations)+addit;
abserr += std::fabs(B[ji] - reference);
}
}
#ifdef VERBOSE
std::cout << "Sum of absolute differences: " << abserr << std::endl;
#endif
const auto epsilon = 1.0e-8;
if (abserr < epsilon) {
std::cout << "Solution validates" << std::endl;
auto avgtime = trans_time/iterations;
auto bytes = (size_t)order * (size_t)order * sizeof(double);
std::cout << "Rate (MB/s): " << 1.0e-6 * (2L*bytes)/avgtime
<< " Avg time (s): " << avgtime << std::endl;
} else {
std::cout << "ERROR: Aggregate squared error " << abserr
<< " exceeds threshold " << epsilon << std::endl;
return 1;
}
return 0;
}
|
9e1256138f8e1927a079e3a739f369525b2e6a82.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel_hip.cuh"
uint64_t lastInteractionUpdate; /**< Date of the last interraction update */
struct vector3_t * cu_position; // Float array of entities positions [x0,y0,z0][x1,y1,z1]...
struct vector3_t * cu_currVelocity; // Float array of current entities velocities [vx0,vy0,vz0][vx1,vy1,vz1]...
struct vector3_t * cu_newVelocity; // Float array of new entities velocities [vx0,vy0,vz0][vx1,vy1,vz1]...
float * cu_mass; // Float array
/**
* Each thread of each block has recieve an entity
* and compute its new velocity by comparing it to all others entities
* @param cu_position: vector of all the positions of the entity of the universe
* @param cu_newVelocity: new velocity computed by each thread
* @param cu_mass: vector of all masses of the entities of the universe
* @param entityCount: number of entity in the universe (and size of the above vectors)
*/
__global__ void kernel_compute_new_velocity(struct vector3_t * cu_position, struct vector3_t * cu_newVelocity, float * cu_mass, uint32_t entityCount)
{
uint32_t curEntity = fmaf(blockIdx.x, blockDim.x, threadIdx.x);
uint32_t entityIndex;
struct vector3_t delta; /** Vector from the current entity to the neighbor */
float distance; /** Distance between the current entity to the neighbor */
float coeff; /** Coefficient applied for velocity calculation */
float curX, curY, curZ;
if (curEntity >= entityCount) {
return;
}
/** Reset the velocity */
(cu_newVelocity[curEntity]).x = 0;
(cu_newVelocity[curEntity]).y = 0;
(cu_newVelocity[curEntity]).z = 0;
curX = (cu_position[curEntity]).x;
curY = (cu_position[curEntity]).y;
curZ = (cu_position[curEntity]).z;
for (entityIndex = 0; entityIndex < entityCount; entityIndex++) {
/** Ignore the current entity */
if (entityIndex == curEntity) {
continue;
}
/** Compute the slope vector delta */
delta.x = (cu_position[entityIndex]).x - curX;
delta.y = (cu_position[entityIndex]).y - curY;
delta.z = (cu_position[entityIndex]).z - curZ;
/** Compute the distance by computing the abs of delta */
distance = sqrtf(POW_2(delta.x) + POW_2(delta.y) + POW_2(delta.z));
if (distance == 0) {
// TODO Entity collisions
continue;
}
/** Pre-Computed coeff to avoid repetition */
coeff = cu_mass[entityIndex] / (POW_3(distance));
/** Add the neighboor contribution to the new velocity of the current entity */
(cu_newVelocity[curEntity]).x = fmaf(delta.x, coeff, (cu_newVelocity[curEntity]).x);
(cu_newVelocity[curEntity]).y = fmaf(delta.y, coeff, (cu_newVelocity[curEntity]).y);
(cu_newVelocity[curEntity]).z = fmaf(delta.z, coeff, (cu_newVelocity[curEntity]).z);
}
}
__global__ void kernel_apply_new_velocity(struct vector3_t * cu_position,
struct vector3_t * cu_currVelocity, struct vector3_t * cu_newVelocity,
float constFactor, float si_timeFactor, int entityCount)
{
uint32_t curEntity = fmaf(blockIdx.x, blockDim.x, threadIdx.x);
float curVelX, curVelY, curVelZ;
// Avoid memory overrun
if (curEntity >= entityCount) {
return;
}
// Save values in local memory
curVelX = (cu_currVelocity[curEntity]).x;
curVelY = (cu_currVelocity[curEntity]).y;
curVelZ = (cu_currVelocity[curEntity]).z;
// Update current velocity
cu_currVelocity[curEntity].x = fmaf(cu_newVelocity[curEntity].x, constFactor, curVelX);
cu_currVelocity[curEntity].y = fmaf(cu_newVelocity[curEntity].y, constFactor, curVelY);
cu_currVelocity[curEntity].z = fmaf(cu_newVelocity[curEntity].z, constFactor, curVelZ);
// Update position
cu_position[curEntity].x = fmaf(si_timeFactor, curVelX, cu_position[curEntity].x);
cu_position[curEntity].y = fmaf(si_timeFactor, curVelY, cu_position[curEntity].y);
cu_position[curEntity].z = fmaf(si_timeFactor, curVelZ, cu_position[curEntity].z);
}
/**
* Allocate memory on GPU and copy init value
*/
uint32_t cuda_allocate(struct universe_t *universe)
{
cuda_lib_malloc((void**) &cu_position, universe->entityCount * sizeof(struct vector3_t));
cuda_lib_malloc((void**) &cu_currVelocity, universe->entityCount * sizeof(struct vector3_t));
cuda_lib_malloc((void**) &cu_newVelocity, universe->entityCount * sizeof(struct vector3_t));
cuda_lib_malloc((void**) &cu_mass, universe->entityCount * sizeof(float));
cuda_lib_memcpy(cu_position, universe->position, universe->entityCount * sizeof(struct vector3_t), hipMemcpyHostToDevice);
cuda_lib_memcpy(cu_currVelocity ,universe->currVelocity, universe->entityCount * sizeof(struct vector3_t), hipMemcpyHostToDevice);
cuda_lib_memcpy(cu_mass, universe->mass, universe->entityCount * sizeof(float), hipMemcpyHostToDevice);
return 0;
}
/**
* Init the lastInteractionUpdate variable to avoid jump on start
*/
void cuda_start_make_a_move(void)
{
lastInteractionUpdate = getTimeOfDayInUs();
}
/**
* Compute the next move of all entities
*/
void cuda_make_a_move(struct universe_t *universe, float constFactor, float si_timeFactor)
{
int nbBlock = 1;
int nbThreadPerBlock = 1023;
float deltaInSec;
/** Compute new velocities on all threads of all blocks */
hipLaunchKernelGGL(( kernel_compute_new_velocity), dim3(nbBlock), dim3(nbThreadPerBlock), 0, 0, cu_position, cu_newVelocity, cu_mass, universe->entityCount);
/** Compute the time between two apply and update the constant factor */
deltaInSec = (getTimeOfDayInUs() - lastInteractionUpdate) / 1000000.0f;
lastInteractionUpdate = getTimeOfDayInUs();
constFactor *= deltaInSec;
si_timeFactor *= deltaInSec;
/** Apply new velocity */
hipLaunchKernelGGL(( kernel_apply_new_velocity), dim3(nbBlock), dim3(nbThreadPerBlock), 0, 0, cu_position, cu_currVelocity, cu_newVelocity, constFactor, si_timeFactor, universe->entityCount);
/** Copy results from device to host */
cuda_lib_memcpy(universe->position, cu_position, universe->entityCount * sizeof(struct vector3_t), hipMemcpyDeviceToHost);
}
/**
* Free all memory previously allocated
*/
void cuda_free(void)
{
cuda_lib_free((void *)cu_position);
cuda_lib_free((void *)cu_currVelocity);
cuda_lib_free((void *)cu_newVelocity);
cuda_lib_free((void *)cu_mass);
}
| 9e1256138f8e1927a079e3a739f369525b2e6a82.cu | #include "kernel.cuh"
uint64_t lastInteractionUpdate; /**< Date of the last interraction update */
struct vector3_t * cu_position; // Float array of entities positions [x0,y0,z0][x1,y1,z1]...
struct vector3_t * cu_currVelocity; // Float array of current entities velocities [vx0,vy0,vz0][vx1,vy1,vz1]...
struct vector3_t * cu_newVelocity; // Float array of new entities velocities [vx0,vy0,vz0][vx1,vy1,vz1]...
float * cu_mass; // Float array
/**
* Each thread of each block has recieve an entity
* and compute its new velocity by comparing it to all others entities
* @param cu_position: vector of all the positions of the entity of the universe
* @param cu_newVelocity: new velocity computed by each thread
* @param cu_mass: vector of all masses of the entities of the universe
* @param entityCount: number of entity in the universe (and size of the above vectors)
*/
__global__ void kernel_compute_new_velocity(struct vector3_t * cu_position, struct vector3_t * cu_newVelocity, float * cu_mass, uint32_t entityCount)
{
uint32_t curEntity = fmaf(blockIdx.x, blockDim.x, threadIdx.x);
uint32_t entityIndex;
struct vector3_t delta; /** Vector from the current entity to the neighbor */
float distance; /** Distance between the current entity to the neighbor */
float coeff; /** Coefficient applied for velocity calculation */
float curX, curY, curZ;
if (curEntity >= entityCount) {
return;
}
/** Reset the velocity */
(cu_newVelocity[curEntity]).x = 0;
(cu_newVelocity[curEntity]).y = 0;
(cu_newVelocity[curEntity]).z = 0;
curX = (cu_position[curEntity]).x;
curY = (cu_position[curEntity]).y;
curZ = (cu_position[curEntity]).z;
for (entityIndex = 0; entityIndex < entityCount; entityIndex++) {
/** Ignore the current entity */
if (entityIndex == curEntity) {
continue;
}
/** Compute the slope vector delta */
delta.x = (cu_position[entityIndex]).x - curX;
delta.y = (cu_position[entityIndex]).y - curY;
delta.z = (cu_position[entityIndex]).z - curZ;
/** Compute the distance by computing the abs of delta */
distance = sqrtf(POW_2(delta.x) + POW_2(delta.y) + POW_2(delta.z));
if (distance == 0) {
// TODO Entity collisions
continue;
}
/** Pre-Computed coeff to avoid repetition */
coeff = cu_mass[entityIndex] / (POW_3(distance));
/** Add the neighboor contribution to the new velocity of the current entity */
(cu_newVelocity[curEntity]).x = fmaf(delta.x, coeff, (cu_newVelocity[curEntity]).x);
(cu_newVelocity[curEntity]).y = fmaf(delta.y, coeff, (cu_newVelocity[curEntity]).y);
(cu_newVelocity[curEntity]).z = fmaf(delta.z, coeff, (cu_newVelocity[curEntity]).z);
}
}
__global__ void kernel_apply_new_velocity(struct vector3_t * cu_position,
struct vector3_t * cu_currVelocity, struct vector3_t * cu_newVelocity,
float constFactor, float si_timeFactor, int entityCount)
{
uint32_t curEntity = fmaf(blockIdx.x, blockDim.x, threadIdx.x);
float curVelX, curVelY, curVelZ;
// Avoid memory overrun
if (curEntity >= entityCount) {
return;
}
// Save values in local memory
curVelX = (cu_currVelocity[curEntity]).x;
curVelY = (cu_currVelocity[curEntity]).y;
curVelZ = (cu_currVelocity[curEntity]).z;
// Update current velocity
cu_currVelocity[curEntity].x = fmaf(cu_newVelocity[curEntity].x, constFactor, curVelX);
cu_currVelocity[curEntity].y = fmaf(cu_newVelocity[curEntity].y, constFactor, curVelY);
cu_currVelocity[curEntity].z = fmaf(cu_newVelocity[curEntity].z, constFactor, curVelZ);
// Update position
cu_position[curEntity].x = fmaf(si_timeFactor, curVelX, cu_position[curEntity].x);
cu_position[curEntity].y = fmaf(si_timeFactor, curVelY, cu_position[curEntity].y);
cu_position[curEntity].z = fmaf(si_timeFactor, curVelZ, cu_position[curEntity].z);
}
/**
* Allocate memory on GPU and copy init value
*/
uint32_t cuda_allocate(struct universe_t *universe)
{
cuda_lib_malloc((void**) &cu_position, universe->entityCount * sizeof(struct vector3_t));
cuda_lib_malloc((void**) &cu_currVelocity, universe->entityCount * sizeof(struct vector3_t));
cuda_lib_malloc((void**) &cu_newVelocity, universe->entityCount * sizeof(struct vector3_t));
cuda_lib_malloc((void**) &cu_mass, universe->entityCount * sizeof(float));
cuda_lib_memcpy(cu_position, universe->position, universe->entityCount * sizeof(struct vector3_t), cudaMemcpyHostToDevice);
cuda_lib_memcpy(cu_currVelocity ,universe->currVelocity, universe->entityCount * sizeof(struct vector3_t), cudaMemcpyHostToDevice);
cuda_lib_memcpy(cu_mass, universe->mass, universe->entityCount * sizeof(float), cudaMemcpyHostToDevice);
return 0;
}
/**
* Init the lastInteractionUpdate variable to avoid jump on start
*/
void cuda_start_make_a_move(void)
{
lastInteractionUpdate = getTimeOfDayInUs();
}
/**
* Compute the next move of all entities
*/
void cuda_make_a_move(struct universe_t *universe, float constFactor, float si_timeFactor)
{
int nbBlock = 1;
int nbThreadPerBlock = 1023;
float deltaInSec;
/** Compute new velocities on all threads of all blocks */
kernel_compute_new_velocity<<<nbBlock, nbThreadPerBlock>>>(cu_position, cu_newVelocity, cu_mass, universe->entityCount);
/** Compute the time between two apply and update the constant factor */
deltaInSec = (getTimeOfDayInUs() - lastInteractionUpdate) / 1000000.0f;
lastInteractionUpdate = getTimeOfDayInUs();
constFactor *= deltaInSec;
si_timeFactor *= deltaInSec;
/** Apply new velocity */
kernel_apply_new_velocity<<<nbBlock, nbThreadPerBlock>>>(cu_position, cu_currVelocity, cu_newVelocity, constFactor, si_timeFactor, universe->entityCount);
/** Copy results from device to host */
cuda_lib_memcpy(universe->position, cu_position, universe->entityCount * sizeof(struct vector3_t), cudaMemcpyDeviceToHost);
}
/**
* Free all memory previously allocated
*/
void cuda_free(void)
{
cuda_lib_free((void *)cu_position);
cuda_lib_free((void *)cu_currVelocity);
cuda_lib_free((void *)cu_newVelocity);
cuda_lib_free((void *)cu_mass);
}
|
14f1784780ff49cd87305fea0468afdb6f805051.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ------------------------------------------------------------------
// Fast R-CNN
// Copyright (c) 2015 Microsoft
// Licensed under The MIT License [see fast-rcnn/LICENSE for details]
// Written by Ross Girshick
// ------------------------------------------------------------------
#include <hipcub/hipcub.hpp>
#include <iomanip>
#include "caffe/FRCNN/frcnn_proposal_layer.hpp"
#include "caffe/FRCNN/util/frcnn_utils.hpp"
#include "caffe/FRCNN/util/frcnn_helper.hpp"
#include "caffe/FRCNN/util/frcnn_param.hpp"
#include "caffe/FRCNN/util/frcnn_gpu_nms.hpp"
#include "caffe/util/device_alternate.hpp"
namespace caffe {
namespace Frcnn {
using std::vector;
__global__ void GetIndex(const int n,int *indices){
CUDA_KERNEL_LOOP(index , n){
indices[index] = index;
}
}
template <typename Dtype>
__global__ void BBoxTransformInv(const int nthreads, const Dtype* const bottom_rpn_bbox,
const int height, const int width, const int feat_stride,
const int im_height, const int im_width,
const int* sorted_indices, const float* anchors,
float* const transform_bbox) {
CUDA_KERNEL_LOOP(index , nthreads) {
const int score_idx = sorted_indices[index];
const int i = score_idx % width; // width
const int j = (score_idx % (width * height)) / width; // height
const int k = score_idx / (width * height); // channel
float *box = transform_bbox + index * 4;
box[0] = anchors[k * 4 + 0] + i * feat_stride;
box[1] = anchors[k * 4 + 1] + j * feat_stride;
box[2] = anchors[k * 4 + 2] + i * feat_stride;
box[3] = anchors[k * 4 + 3] + j * feat_stride;
const Dtype det[4] = { bottom_rpn_bbox[(k * 4 + 0) * height * width + j * width + i],
bottom_rpn_bbox[(k * 4 + 1) * height * width + j * width + i],
bottom_rpn_bbox[(k * 4 + 2) * height * width + j * width + i],
bottom_rpn_bbox[(k * 4 + 3) * height * width + j * width + i] };
float src_w = box[2] - box[0] + 1;
float src_h = box[3] - box[1] + 1;
float src_ctr_x = box[0] + 0.5 * src_w;
float src_ctr_y = box[1] + 0.5 * src_h;
float pred_ctr_x = det[0] * src_w + src_ctr_x;
float pred_ctr_y = det[1] * src_h + src_ctr_y;
float pred_w = exp(det[2]) * src_w;
float pred_h = exp(det[3]) * src_h;
box[0] = pred_ctr_x - 0.5 * pred_w;
box[1] = pred_ctr_y - 0.5 * pred_h;
box[2] = pred_ctr_x + 0.5 * pred_w;
box[3] = pred_ctr_y + 0.5 * pred_h;
box[0] = max(0.0f, min(box[0], im_width - 1.0));
box[1] = max(0.0f, min(box[1], im_height - 1.0));
box[2] = max(0.0f, min(box[2], im_width - 1.0));
box[3] = max(0.0f, min(box[3], im_height - 1.0));
}
}
__global__ void SelectBox(const int nthreads, const float *box, float min_size,
int *flags) {
CUDA_KERNEL_LOOP(index , nthreads) {
if ((box[index * 4 + 2] - box[index * 4 + 0] < min_size) ||
(box[index * 4 + 3] - box[index * 4 + 1] < min_size)) {
flags[index] = 0;
} else {
flags[index] = 1;
}
}
}
template <typename Dtype>
__global__ void SelectBoxByIndices(const int nthreads, const float *in_box, int *selected_indices,
float *out_box, const Dtype *in_score, Dtype *out_score) {
CUDA_KERNEL_LOOP(index , nthreads) {
if ((index == 0 && selected_indices[index] == 1) ||
(index > 0 && selected_indices[index] == selected_indices[index - 1] + 1)) {
out_box[(selected_indices[index] - 1) * 4 + 0] = in_box[index * 4 + 0];
out_box[(selected_indices[index] - 1) * 4 + 1] = in_box[index * 4 + 1];
out_box[(selected_indices[index] - 1) * 4 + 2] = in_box[index * 4 + 2];
out_box[(selected_indices[index] - 1) * 4 + 3] = in_box[index * 4 + 3];
if (in_score!=NULL && out_score!=NULL) {
out_score[selected_indices[index] - 1] = in_score[index];
}
}
}
}
template <typename Dtype>
__global__ void SelectBoxAftNMS(const int nthreads, const float *in_box, int *keep_indices,
Dtype *top_data, const Dtype *in_score, Dtype* top_score) {
CUDA_KERNEL_LOOP(index , nthreads) {
top_data[index * 5] = 0;
int keep_idx = keep_indices[index];
for (int j = 1; j < 5; ++j) {
top_data[index * 5 + j] = in_box[keep_idx * 4 + j - 1];
}
if (top_score != NULL && in_score != NULL) {
top_score[index] = in_score[keep_idx];
}
}
}
template <typename Dtype>
void FrcnnProposalLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype> *> &bottom,
const vector<Blob<Dtype> *> &top) {
Forward_cpu(bottom, top);
return ;
#if 0
DLOG(ERROR) << "========== enter proposal layer";
const Dtype *bottom_rpn_score = bottom[0]->gpu_data();
const Dtype *bottom_rpn_bbox = bottom[1]->gpu_data();
// bottom data comes from host memory
Dtype bottom_im_info[3];
CHECK_EQ(bottom[2]->count(), 3);
CUDA_CHECK(hipMemcpy(bottom_im_info, bottom[2]->gpu_data(), sizeof(Dtype) * 3, hipMemcpyDeviceToHost));
const int num = bottom[1]->num();
const int channes = bottom[1]->channels();
const int height = bottom[1]->height();
const int width = bottom[1]->width();
CHECK(num == 1) << "only single item batches are supported";
CHECK(channes % 4 == 0) << "rpn bbox pred channels should be divided by 4";
const float im_height = bottom_im_info[0];
const float im_width = bottom_im_info[1];
int rpn_pre_nms_top_n;
int rpn_post_nms_top_n;
float rpn_nms_thresh;
int rpn_min_size;
if (this->phase_ == TRAIN) {
rpn_pre_nms_top_n = FrcnnParam::rpn_pre_nms_top_n;
rpn_post_nms_top_n = FrcnnParam::rpn_post_nms_top_n;
rpn_nms_thresh = FrcnnParam::rpn_nms_thresh;
rpn_min_size = FrcnnParam::rpn_min_size;
} else {
rpn_pre_nms_top_n = FrcnnParam::test_rpn_pre_nms_top_n;
rpn_post_nms_top_n = FrcnnParam::test_rpn_post_nms_top_n;
rpn_nms_thresh = FrcnnParam::test_rpn_nms_thresh;
rpn_min_size = FrcnnParam::test_rpn_min_size;
}
LOG_IF(ERROR, rpn_pre_nms_top_n <= 0 ) << "rpn_pre_nms_top_n : " << rpn_pre_nms_top_n;
LOG_IF(ERROR, rpn_post_nms_top_n <= 0 ) << "rpn_post_nms_top_n : " << rpn_post_nms_top_n;
if (rpn_pre_nms_top_n <= 0 || rpn_post_nms_top_n <= 0 ) return;
const int config_n_anchors = FrcnnParam::anchors.size() / 4;
const int total_anchor_num = config_n_anchors * height * width;
//Step 1. -------------------------------Sort the rpn result----------------------
// the first half of rpn_score is the bg score
// Note that the sorting operator will change the order fg_scores (bottom_rpn_score)
Dtype *fg_scores = (Dtype*)(&bottom_rpn_score[total_anchor_num]);
Dtype *sorted_scores = NULL;
CUDA_CHECK(hipMalloc((void**)&sorted_scores, sizeof(Dtype) * total_anchor_num));
cub::DoubleBuffer<Dtype> d_keys(fg_scores, sorted_scores);
int *indices = NULL;
CUDA_CHECK(hipMalloc((void**)&indices, sizeof(int) * total_anchor_num));
hipLaunchKernelGGL(( GetIndex), dim3(caffe::CAFFE_GET_BLOCKS(total_anchor_num)), dim3(caffe::CAFFE_CUDA_NUM_THREADS), 0, 0,
total_anchor_num, indices);
hipDeviceSynchronize();
int *sorted_indices = NULL;
CUDA_CHECK(hipMalloc((void**)&sorted_indices, sizeof(int) * total_anchor_num));
cub::DoubleBuffer<int> d_values(indices, sorted_indices);
void *sort_temp_storage_ = NULL;
size_t sort_temp_storage_bytes_ = 0;
// calculate the temp_storage_bytes
hipcub::DeviceRadixSort::SortPairsDescending(sort_temp_storage_, sort_temp_storage_bytes_,
d_keys, d_values, total_anchor_num);
DLOG(ERROR) << "sort_temp_storage_bytes_ : " << sort_temp_storage_bytes_;
CUDA_CHECK(hipMalloc(&sort_temp_storage_, sort_temp_storage_bytes_));
// sorting
hipcub::DeviceRadixSort::SortPairsDescending(sort_temp_storage_, sort_temp_storage_bytes_,
d_keys, d_values, total_anchor_num);
hipDeviceSynchronize();
//Step 2. ---------------------------bbox transform----------------------------
const int retained_anchor_num = ::min(total_anchor_num, rpn_pre_nms_top_n);
// float *transform_bbox = NULL;
// CUDA_CHECK(hipMalloc(&transform_bbox, sizeof(float) * retained_anchor_num * 4));
hipLaunchKernelGGL(( BBoxTransformInv<Dtype>), dim3(caffe::CAFFE_GET_BLOCKS(retained_anchor_num)), dim3(caffe::CAFFE_CUDA_NUM_THREADS), 0, 0,
retained_anchor_num, bottom_rpn_bbox, height, width, FrcnnParam::feat_stride,
im_height, im_width, sorted_indices, anchors_, transform_bbox_);
hipDeviceSynchronize();
//Step 3. -------------------------filter out small box-----------------------
// select the box larger than min size
// int *selected_flags = NULL;
// CUDA_CHECK(hipMalloc(&selected_flags, sizeof(int) * retained_anchor_num));
hipLaunchKernelGGL(( SelectBox), dim3(caffe::CAFFE_GET_BLOCKS(retained_anchor_num)), dim3(caffe::CAFFE_CUDA_NUM_THREADS), 0, 0,
retained_anchor_num, transform_bbox_, bottom_im_info[2] * rpn_min_size, selected_flags_);
hipDeviceSynchronize();
// cumulative sum up the flags to get the copy index
int *selected_indices_ = NULL;
CUDA_CHECK(hipMalloc((void**)&selected_indices_, sizeof(int) * retained_anchor_num));
void *cumsum_temp_storage_ = NULL;
size_t cumsum_temp_storage_bytes_ = 0;
hipcub::DeviceScan::InclusiveSum(cumsum_temp_storage_, cumsum_temp_storage_bytes_,
selected_flags_, selected_indices_, retained_anchor_num);
DLOG(ERROR) << "cumsum_temp_storage_bytes : " << cumsum_temp_storage_bytes_;
CUDA_CHECK(hipMalloc(&cumsum_temp_storage_, cumsum_temp_storage_bytes_));
hipcub::DeviceScan::InclusiveSum(sort_temp_storage_, cumsum_temp_storage_bytes_,
selected_flags_, selected_indices_, retained_anchor_num);
// CUDA_CHECK(hipFree(cumsum_temp_storage));
int selected_num = -1;
hipMemcpy(&selected_num, &selected_indices_[retained_anchor_num - 1], sizeof(int), hipMemcpyDeviceToHost);
CHECK_GT(selected_num, 0);
Dtype *bbox_score_ = NULL;
if (top.size() == 2) CUDA_CHECK(hipMalloc(&bbox_score_, sizeof(Dtype) * retained_anchor_num));
hipLaunchKernelGGL(( SelectBoxByIndices), dim3(caffe::CAFFE_GET_BLOCKS(selected_num)), dim3(caffe::CAFFE_CUDA_NUM_THREADS), 0, 0,
selected_num, transform_bbox_, selected_indices_, transform_bbox_, sorted_scores, bbox_score_);
hipDeviceSynchronize();
//Step 4. -----------------------------apply nms-------------------------------
DLOG(ERROR) << "========== apply nms with rpn_nms_thresh : " << rpn_nms_thresh;
vector<int> keep_indices(selected_num);
int keep_num = -1;
gpu_nms(&keep_indices[0], &keep_num, transform_bbox_, selected_num, 4, rpn_nms_thresh);
DLOG(ERROR) << "rpn num after gpu nms: " << keep_num;
keep_num = ::min(keep_num, rpn_post_nms_top_n);
DLOG(ERROR) << "========== copy to top";
hipMemcpy(gpu_keep_indices_, &keep_indices[0], sizeof(int) * keep_num, hipMemcpyHostToDevice);
top[0]->Reshape(keep_num, 5, 1, 1);
Dtype *top_data = top[0]->mutable_gpu_data();
Dtype *top_score = NULL;
if (top.size() == 2) {
top[1]->Reshape(keep_num, 1, 1, 1);
top_score = top[1]->mutable_gpu_data();
}
hipLaunchKernelGGL(( SelectBoxAftNMS), dim3(caffe::CAFFE_GET_BLOCKS(keep_num)), dim3(caffe::CAFFE_CUDA_NUM_THREADS), 0, 0,
keep_num, transform_bbox_, gpu_keep_indices_, top_data, bbox_score_, top_score);
DLOG(ERROR) << "========== exit proposal layer";
////////////////////////////////////
// do not forget to free the malloc memory
CUDA_CHECK(hipFree(sorted_scores));
CUDA_CHECK(hipFree(indices));
CUDA_CHECK(hipFree(sorted_indices));
CUDA_CHECK(hipFree(sort_temp_storage_));
CUDA_CHECK(hipFree(cumsum_temp_storage_));
CUDA_CHECK(hipFree(selected_indices_));
if (bbox_score_!=NULL) CUDA_CHECK(hipFree(bbox_score_));
#endif
}
template <typename Dtype>
void FrcnnProposalLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype> *> &top,
const vector<bool> &propagate_down, const vector<Blob<Dtype> *> &bottom) {
for (int i = 0; i < propagate_down.size(); ++i) {
if (propagate_down[i]) {
NOT_IMPLEMENTED;
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(FrcnnProposalLayer);
} // namespace frcnn
} // namespace caffe
| 14f1784780ff49cd87305fea0468afdb6f805051.cu | // ------------------------------------------------------------------
// Fast R-CNN
// Copyright (c) 2015 Microsoft
// Licensed under The MIT License [see fast-rcnn/LICENSE for details]
// Written by Ross Girshick
// ------------------------------------------------------------------
#include <cub/cub.cuh>
#include <iomanip>
#include "caffe/FRCNN/frcnn_proposal_layer.hpp"
#include "caffe/FRCNN/util/frcnn_utils.hpp"
#include "caffe/FRCNN/util/frcnn_helper.hpp"
#include "caffe/FRCNN/util/frcnn_param.hpp"
#include "caffe/FRCNN/util/frcnn_gpu_nms.hpp"
#include "caffe/util/device_alternate.hpp"
namespace caffe {
namespace Frcnn {
using std::vector;
__global__ void GetIndex(const int n,int *indices){
CUDA_KERNEL_LOOP(index , n){
indices[index] = index;
}
}
template <typename Dtype>
__global__ void BBoxTransformInv(const int nthreads, const Dtype* const bottom_rpn_bbox,
const int height, const int width, const int feat_stride,
const int im_height, const int im_width,
const int* sorted_indices, const float* anchors,
float* const transform_bbox) {
CUDA_KERNEL_LOOP(index , nthreads) {
const int score_idx = sorted_indices[index];
const int i = score_idx % width; // width
const int j = (score_idx % (width * height)) / width; // height
const int k = score_idx / (width * height); // channel
float *box = transform_bbox + index * 4;
box[0] = anchors[k * 4 + 0] + i * feat_stride;
box[1] = anchors[k * 4 + 1] + j * feat_stride;
box[2] = anchors[k * 4 + 2] + i * feat_stride;
box[3] = anchors[k * 4 + 3] + j * feat_stride;
const Dtype det[4] = { bottom_rpn_bbox[(k * 4 + 0) * height * width + j * width + i],
bottom_rpn_bbox[(k * 4 + 1) * height * width + j * width + i],
bottom_rpn_bbox[(k * 4 + 2) * height * width + j * width + i],
bottom_rpn_bbox[(k * 4 + 3) * height * width + j * width + i] };
float src_w = box[2] - box[0] + 1;
float src_h = box[3] - box[1] + 1;
float src_ctr_x = box[0] + 0.5 * src_w;
float src_ctr_y = box[1] + 0.5 * src_h;
float pred_ctr_x = det[0] * src_w + src_ctr_x;
float pred_ctr_y = det[1] * src_h + src_ctr_y;
float pred_w = exp(det[2]) * src_w;
float pred_h = exp(det[3]) * src_h;
box[0] = pred_ctr_x - 0.5 * pred_w;
box[1] = pred_ctr_y - 0.5 * pred_h;
box[2] = pred_ctr_x + 0.5 * pred_w;
box[3] = pred_ctr_y + 0.5 * pred_h;
box[0] = max(0.0f, min(box[0], im_width - 1.0));
box[1] = max(0.0f, min(box[1], im_height - 1.0));
box[2] = max(0.0f, min(box[2], im_width - 1.0));
box[3] = max(0.0f, min(box[3], im_height - 1.0));
}
}
__global__ void SelectBox(const int nthreads, const float *box, float min_size,
int *flags) {
CUDA_KERNEL_LOOP(index , nthreads) {
if ((box[index * 4 + 2] - box[index * 4 + 0] < min_size) ||
(box[index * 4 + 3] - box[index * 4 + 1] < min_size)) {
flags[index] = 0;
} else {
flags[index] = 1;
}
}
}
template <typename Dtype>
__global__ void SelectBoxByIndices(const int nthreads, const float *in_box, int *selected_indices,
float *out_box, const Dtype *in_score, Dtype *out_score) {
CUDA_KERNEL_LOOP(index , nthreads) {
if ((index == 0 && selected_indices[index] == 1) ||
(index > 0 && selected_indices[index] == selected_indices[index - 1] + 1)) {
out_box[(selected_indices[index] - 1) * 4 + 0] = in_box[index * 4 + 0];
out_box[(selected_indices[index] - 1) * 4 + 1] = in_box[index * 4 + 1];
out_box[(selected_indices[index] - 1) * 4 + 2] = in_box[index * 4 + 2];
out_box[(selected_indices[index] - 1) * 4 + 3] = in_box[index * 4 + 3];
if (in_score!=NULL && out_score!=NULL) {
out_score[selected_indices[index] - 1] = in_score[index];
}
}
}
}
template <typename Dtype>
__global__ void SelectBoxAftNMS(const int nthreads, const float *in_box, int *keep_indices,
Dtype *top_data, const Dtype *in_score, Dtype* top_score) {
CUDA_KERNEL_LOOP(index , nthreads) {
top_data[index * 5] = 0;
int keep_idx = keep_indices[index];
for (int j = 1; j < 5; ++j) {
top_data[index * 5 + j] = in_box[keep_idx * 4 + j - 1];
}
if (top_score != NULL && in_score != NULL) {
top_score[index] = in_score[keep_idx];
}
}
}
template <typename Dtype>
void FrcnnProposalLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype> *> &bottom,
const vector<Blob<Dtype> *> &top) {
Forward_cpu(bottom, top);
return ;
#if 0
DLOG(ERROR) << "========== enter proposal layer";
const Dtype *bottom_rpn_score = bottom[0]->gpu_data();
const Dtype *bottom_rpn_bbox = bottom[1]->gpu_data();
// bottom data comes from host memory
Dtype bottom_im_info[3];
CHECK_EQ(bottom[2]->count(), 3);
CUDA_CHECK(cudaMemcpy(bottom_im_info, bottom[2]->gpu_data(), sizeof(Dtype) * 3, cudaMemcpyDeviceToHost));
const int num = bottom[1]->num();
const int channes = bottom[1]->channels();
const int height = bottom[1]->height();
const int width = bottom[1]->width();
CHECK(num == 1) << "only single item batches are supported";
CHECK(channes % 4 == 0) << "rpn bbox pred channels should be divided by 4";
const float im_height = bottom_im_info[0];
const float im_width = bottom_im_info[1];
int rpn_pre_nms_top_n;
int rpn_post_nms_top_n;
float rpn_nms_thresh;
int rpn_min_size;
if (this->phase_ == TRAIN) {
rpn_pre_nms_top_n = FrcnnParam::rpn_pre_nms_top_n;
rpn_post_nms_top_n = FrcnnParam::rpn_post_nms_top_n;
rpn_nms_thresh = FrcnnParam::rpn_nms_thresh;
rpn_min_size = FrcnnParam::rpn_min_size;
} else {
rpn_pre_nms_top_n = FrcnnParam::test_rpn_pre_nms_top_n;
rpn_post_nms_top_n = FrcnnParam::test_rpn_post_nms_top_n;
rpn_nms_thresh = FrcnnParam::test_rpn_nms_thresh;
rpn_min_size = FrcnnParam::test_rpn_min_size;
}
LOG_IF(ERROR, rpn_pre_nms_top_n <= 0 ) << "rpn_pre_nms_top_n : " << rpn_pre_nms_top_n;
LOG_IF(ERROR, rpn_post_nms_top_n <= 0 ) << "rpn_post_nms_top_n : " << rpn_post_nms_top_n;
if (rpn_pre_nms_top_n <= 0 || rpn_post_nms_top_n <= 0 ) return;
const int config_n_anchors = FrcnnParam::anchors.size() / 4;
const int total_anchor_num = config_n_anchors * height * width;
//Step 1. -------------------------------Sort the rpn result----------------------
// the first half of rpn_score is the bg score
// Note that the sorting operator will change the order fg_scores (bottom_rpn_score)
Dtype *fg_scores = (Dtype*)(&bottom_rpn_score[total_anchor_num]);
Dtype *sorted_scores = NULL;
CUDA_CHECK(cudaMalloc((void**)&sorted_scores, sizeof(Dtype) * total_anchor_num));
cub::DoubleBuffer<Dtype> d_keys(fg_scores, sorted_scores);
int *indices = NULL;
CUDA_CHECK(cudaMalloc((void**)&indices, sizeof(int) * total_anchor_num));
GetIndex<<<caffe::CAFFE_GET_BLOCKS(total_anchor_num), caffe::CAFFE_CUDA_NUM_THREADS>>>(
total_anchor_num, indices);
cudaDeviceSynchronize();
int *sorted_indices = NULL;
CUDA_CHECK(cudaMalloc((void**)&sorted_indices, sizeof(int) * total_anchor_num));
cub::DoubleBuffer<int> d_values(indices, sorted_indices);
void *sort_temp_storage_ = NULL;
size_t sort_temp_storage_bytes_ = 0;
// calculate the temp_storage_bytes
cub::DeviceRadixSort::SortPairsDescending(sort_temp_storage_, sort_temp_storage_bytes_,
d_keys, d_values, total_anchor_num);
DLOG(ERROR) << "sort_temp_storage_bytes_ : " << sort_temp_storage_bytes_;
CUDA_CHECK(cudaMalloc(&sort_temp_storage_, sort_temp_storage_bytes_));
// sorting
cub::DeviceRadixSort::SortPairsDescending(sort_temp_storage_, sort_temp_storage_bytes_,
d_keys, d_values, total_anchor_num);
cudaDeviceSynchronize();
//Step 2. ---------------------------bbox transform----------------------------
const int retained_anchor_num = std::min(total_anchor_num, rpn_pre_nms_top_n);
// float *transform_bbox = NULL;
// CUDA_CHECK(cudaMalloc(&transform_bbox, sizeof(float) * retained_anchor_num * 4));
BBoxTransformInv<Dtype><<<caffe::CAFFE_GET_BLOCKS(retained_anchor_num), caffe::CAFFE_CUDA_NUM_THREADS>>>(
retained_anchor_num, bottom_rpn_bbox, height, width, FrcnnParam::feat_stride,
im_height, im_width, sorted_indices, anchors_, transform_bbox_);
cudaDeviceSynchronize();
//Step 3. -------------------------filter out small box-----------------------
// select the box larger than min size
// int *selected_flags = NULL;
// CUDA_CHECK(cudaMalloc(&selected_flags, sizeof(int) * retained_anchor_num));
SelectBox<<<caffe::CAFFE_GET_BLOCKS(retained_anchor_num), caffe::CAFFE_CUDA_NUM_THREADS>>>(
retained_anchor_num, transform_bbox_, bottom_im_info[2] * rpn_min_size, selected_flags_);
cudaDeviceSynchronize();
// cumulative sum up the flags to get the copy index
int *selected_indices_ = NULL;
CUDA_CHECK(cudaMalloc((void**)&selected_indices_, sizeof(int) * retained_anchor_num));
void *cumsum_temp_storage_ = NULL;
size_t cumsum_temp_storage_bytes_ = 0;
cub::DeviceScan::InclusiveSum(cumsum_temp_storage_, cumsum_temp_storage_bytes_,
selected_flags_, selected_indices_, retained_anchor_num);
DLOG(ERROR) << "cumsum_temp_storage_bytes : " << cumsum_temp_storage_bytes_;
CUDA_CHECK(cudaMalloc(&cumsum_temp_storage_, cumsum_temp_storage_bytes_));
cub::DeviceScan::InclusiveSum(sort_temp_storage_, cumsum_temp_storage_bytes_,
selected_flags_, selected_indices_, retained_anchor_num);
// CUDA_CHECK(cudaFree(cumsum_temp_storage));
int selected_num = -1;
cudaMemcpy(&selected_num, &selected_indices_[retained_anchor_num - 1], sizeof(int), cudaMemcpyDeviceToHost);
CHECK_GT(selected_num, 0);
Dtype *bbox_score_ = NULL;
if (top.size() == 2) CUDA_CHECK(cudaMalloc(&bbox_score_, sizeof(Dtype) * retained_anchor_num));
SelectBoxByIndices<<<caffe::CAFFE_GET_BLOCKS(selected_num), caffe::CAFFE_CUDA_NUM_THREADS>>>(
selected_num, transform_bbox_, selected_indices_, transform_bbox_, sorted_scores, bbox_score_);
cudaDeviceSynchronize();
//Step 4. -----------------------------apply nms-------------------------------
DLOG(ERROR) << "========== apply nms with rpn_nms_thresh : " << rpn_nms_thresh;
vector<int> keep_indices(selected_num);
int keep_num = -1;
gpu_nms(&keep_indices[0], &keep_num, transform_bbox_, selected_num, 4, rpn_nms_thresh);
DLOG(ERROR) << "rpn num after gpu nms: " << keep_num;
keep_num = std::min(keep_num, rpn_post_nms_top_n);
DLOG(ERROR) << "========== copy to top";
cudaMemcpy(gpu_keep_indices_, &keep_indices[0], sizeof(int) * keep_num, cudaMemcpyHostToDevice);
top[0]->Reshape(keep_num, 5, 1, 1);
Dtype *top_data = top[0]->mutable_gpu_data();
Dtype *top_score = NULL;
if (top.size() == 2) {
top[1]->Reshape(keep_num, 1, 1, 1);
top_score = top[1]->mutable_gpu_data();
}
SelectBoxAftNMS<<<caffe::CAFFE_GET_BLOCKS(keep_num), caffe::CAFFE_CUDA_NUM_THREADS>>>(
keep_num, transform_bbox_, gpu_keep_indices_, top_data, bbox_score_, top_score);
DLOG(ERROR) << "========== exit proposal layer";
////////////////////////////////////
// do not forget to free the malloc memory
CUDA_CHECK(cudaFree(sorted_scores));
CUDA_CHECK(cudaFree(indices));
CUDA_CHECK(cudaFree(sorted_indices));
CUDA_CHECK(cudaFree(sort_temp_storage_));
CUDA_CHECK(cudaFree(cumsum_temp_storage_));
CUDA_CHECK(cudaFree(selected_indices_));
if (bbox_score_!=NULL) CUDA_CHECK(cudaFree(bbox_score_));
#endif
}
template <typename Dtype>
void FrcnnProposalLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype> *> &top,
const vector<bool> &propagate_down, const vector<Blob<Dtype> *> &bottom) {
for (int i = 0; i < propagate_down.size(); ++i) {
if (propagate_down[i]) {
NOT_IMPLEMENTED;
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(FrcnnProposalLayer);
} // namespace frcnn
} // namespace caffe
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.